var/home/core/zuul-output/0000755000175000017500000000000015111270010014511 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111301467015470 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004736201615111301461017677 0ustar rootrootNov 25 09:03:23 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 09:03:23 crc restorecon[4681]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 09:03:23 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:24 crc restorecon[4681]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:03:24 crc restorecon[4681]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 09:03:25 crc kubenswrapper[4687]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:03:25 crc kubenswrapper[4687]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 09:03:25 crc kubenswrapper[4687]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:03:25 crc kubenswrapper[4687]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:03:25 crc kubenswrapper[4687]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 09:03:25 crc kubenswrapper[4687]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.435001 4687 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457189 4687 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457220 4687 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457230 4687 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457239 4687 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457249 4687 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457258 4687 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457266 4687 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457275 4687 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457283 4687 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457294 4687 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457305 4687 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457314 4687 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457322 4687 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457332 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457340 4687 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457349 4687 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457359 4687 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457367 4687 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457375 4687 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457383 4687 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457394 4687 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457403 4687 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457411 4687 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457420 4687 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457431 4687 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457440 4687 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457449 4687 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457457 4687 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457464 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457472 4687 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457480 4687 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457488 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457495 4687 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457551 4687 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457559 4687 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457567 4687 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457579 4687 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457587 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457594 4687 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457602 4687 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457611 4687 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457618 4687 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457626 4687 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457634 4687 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457641 4687 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457649 4687 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457656 4687 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457664 4687 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457672 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457679 4687 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457704 4687 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457712 4687 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457721 4687 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457730 4687 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457737 4687 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457745 4687 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457753 4687 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457761 4687 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457769 4687 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457776 4687 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457784 4687 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457791 4687 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457799 4687 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457808 4687 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457817 4687 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457825 4687 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457832 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457842 4687 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457852 4687 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457860 4687 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.457869 4687 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460360 4687 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460384 4687 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460398 4687 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460410 4687 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460439 4687 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460449 4687 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460461 4687 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460474 4687 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460484 4687 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460493 4687 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460533 4687 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460542 4687 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460552 4687 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460561 4687 flags.go:64] FLAG: --cgroup-root="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460570 4687 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460579 4687 flags.go:64] FLAG: --client-ca-file="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460589 4687 flags.go:64] FLAG: --cloud-config="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460598 4687 flags.go:64] FLAG: --cloud-provider="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460607 4687 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460619 4687 flags.go:64] FLAG: --cluster-domain="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460627 4687 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460637 4687 flags.go:64] FLAG: --config-dir="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460645 4687 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460655 4687 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460666 4687 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460675 4687 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460685 4687 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460694 4687 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460704 4687 flags.go:64] FLAG: --contention-profiling="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460714 4687 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460723 4687 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460733 4687 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460742 4687 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460753 4687 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460762 4687 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460772 4687 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460781 4687 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460791 4687 flags.go:64] FLAG: --enable-server="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460800 4687 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460811 4687 flags.go:64] FLAG: --event-burst="100" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460820 4687 flags.go:64] FLAG: --event-qps="50" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460829 4687 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460838 4687 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460848 4687 flags.go:64] FLAG: --eviction-hard="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460859 4687 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460868 4687 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460877 4687 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460886 4687 flags.go:64] FLAG: --eviction-soft="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460895 4687 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460904 4687 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460913 4687 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460923 4687 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460932 4687 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460941 4687 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460950 4687 flags.go:64] FLAG: --feature-gates="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460960 4687 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460969 4687 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460979 4687 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460988 4687 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.460998 4687 flags.go:64] FLAG: --healthz-port="10248" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461007 4687 flags.go:64] FLAG: --help="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461018 4687 flags.go:64] FLAG: --hostname-override="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461027 4687 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461036 4687 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461045 4687 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461054 4687 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461063 4687 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461072 4687 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461081 4687 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461089 4687 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461099 4687 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461108 4687 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461117 4687 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461126 4687 flags.go:64] FLAG: --kube-reserved="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461135 4687 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461164 4687 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461174 4687 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461182 4687 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461191 4687 flags.go:64] FLAG: --lock-file="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461201 4687 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461210 4687 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461228 4687 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461241 4687 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461250 4687 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461259 4687 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461267 4687 flags.go:64] FLAG: --logging-format="text" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461276 4687 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461287 4687 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461296 4687 flags.go:64] FLAG: --manifest-url="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461306 4687 flags.go:64] FLAG: --manifest-url-header="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461322 4687 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461332 4687 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461342 4687 flags.go:64] FLAG: --max-pods="110" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461353 4687 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461362 4687 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461371 4687 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461380 4687 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461390 4687 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461399 4687 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461408 4687 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461429 4687 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461439 4687 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461448 4687 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461457 4687 flags.go:64] FLAG: --pod-cidr="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461466 4687 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461480 4687 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461490 4687 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461520 4687 flags.go:64] FLAG: --pods-per-core="0" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461530 4687 flags.go:64] FLAG: --port="10250" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461539 4687 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461548 4687 flags.go:64] FLAG: --provider-id="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461557 4687 flags.go:64] FLAG: --qos-reserved="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461566 4687 flags.go:64] FLAG: --read-only-port="10255" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461575 4687 flags.go:64] FLAG: --register-node="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461584 4687 flags.go:64] FLAG: --register-schedulable="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461598 4687 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461613 4687 flags.go:64] FLAG: --registry-burst="10" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461622 4687 flags.go:64] FLAG: --registry-qps="5" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461630 4687 flags.go:64] FLAG: --reserved-cpus="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461639 4687 flags.go:64] FLAG: --reserved-memory="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461650 4687 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461659 4687 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461668 4687 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461677 4687 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461686 4687 flags.go:64] FLAG: --runonce="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461695 4687 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461704 4687 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461714 4687 flags.go:64] FLAG: --seccomp-default="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461723 4687 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461731 4687 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461742 4687 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461751 4687 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461760 4687 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461770 4687 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461779 4687 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461787 4687 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461796 4687 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461806 4687 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461815 4687 flags.go:64] FLAG: --system-cgroups="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461824 4687 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461837 4687 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461846 4687 flags.go:64] FLAG: --tls-cert-file="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461855 4687 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461867 4687 flags.go:64] FLAG: --tls-min-version="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461876 4687 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461884 4687 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461893 4687 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461902 4687 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461911 4687 flags.go:64] FLAG: --v="2" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461922 4687 flags.go:64] FLAG: --version="false" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461933 4687 flags.go:64] FLAG: --vmodule="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461945 4687 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.461955 4687 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462162 4687 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462174 4687 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462184 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462193 4687 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462204 4687 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462213 4687 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462222 4687 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462230 4687 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462239 4687 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462247 4687 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462256 4687 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462264 4687 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462271 4687 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462279 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462287 4687 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462295 4687 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462303 4687 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462310 4687 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462318 4687 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462325 4687 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462333 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462341 4687 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462349 4687 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462357 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462367 4687 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462377 4687 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462385 4687 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462395 4687 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462405 4687 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462413 4687 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462421 4687 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462430 4687 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462439 4687 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462447 4687 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462456 4687 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462464 4687 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462473 4687 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462482 4687 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462490 4687 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462497 4687 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462528 4687 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462536 4687 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462544 4687 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462552 4687 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462560 4687 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462567 4687 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462575 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462583 4687 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462590 4687 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462598 4687 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462605 4687 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462613 4687 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462621 4687 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462629 4687 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462637 4687 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462644 4687 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462652 4687 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462660 4687 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462667 4687 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462675 4687 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462683 4687 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462719 4687 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462729 4687 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462738 4687 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462747 4687 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462755 4687 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462764 4687 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462772 4687 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462786 4687 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462797 4687 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.462807 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.463660 4687 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.476119 4687 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.476160 4687 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476280 4687 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476292 4687 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476301 4687 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476310 4687 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476319 4687 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476328 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476337 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476346 4687 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476355 4687 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476363 4687 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476372 4687 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476380 4687 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476388 4687 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476396 4687 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476403 4687 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476412 4687 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476420 4687 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476427 4687 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476435 4687 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476443 4687 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476451 4687 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476459 4687 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476466 4687 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476475 4687 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476483 4687 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476490 4687 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476526 4687 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476536 4687 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476545 4687 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476554 4687 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476564 4687 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476573 4687 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476583 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476592 4687 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476603 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476612 4687 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476620 4687 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476628 4687 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476637 4687 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476644 4687 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476653 4687 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476661 4687 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476668 4687 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476676 4687 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476684 4687 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476692 4687 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476699 4687 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476707 4687 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476714 4687 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476722 4687 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476731 4687 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476739 4687 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476746 4687 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476754 4687 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476764 4687 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476775 4687 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476784 4687 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476793 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476803 4687 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476814 4687 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476824 4687 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476833 4687 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476842 4687 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476852 4687 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476862 4687 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476870 4687 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476878 4687 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476886 4687 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476893 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476901 4687 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.476910 4687 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.476924 4687 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477159 4687 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477174 4687 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477182 4687 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477192 4687 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477200 4687 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477208 4687 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477216 4687 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477223 4687 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477231 4687 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477240 4687 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477247 4687 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477255 4687 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477263 4687 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477270 4687 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477278 4687 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477288 4687 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477300 4687 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477309 4687 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477318 4687 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477327 4687 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477336 4687 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477345 4687 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477353 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477361 4687 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477368 4687 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477376 4687 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477384 4687 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477391 4687 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477399 4687 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477406 4687 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477417 4687 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477426 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477435 4687 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477443 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477453 4687 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477462 4687 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477471 4687 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477480 4687 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477488 4687 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477496 4687 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477528 4687 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477536 4687 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477544 4687 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477552 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477560 4687 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477568 4687 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477576 4687 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477583 4687 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477591 4687 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477599 4687 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477607 4687 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477615 4687 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477622 4687 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477631 4687 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477641 4687 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477650 4687 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477659 4687 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477667 4687 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477675 4687 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477682 4687 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477690 4687 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477698 4687 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477705 4687 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477713 4687 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477720 4687 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477728 4687 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477735 4687 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477743 4687 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477751 4687 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477758 4687 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.477768 4687 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.477778 4687 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.479004 4687 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.484348 4687 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.484481 4687 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.486364 4687 server.go:997] "Starting client certificate rotation" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.486417 4687 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.486609 4687 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-27 10:37:05.437164693 +0000 UTC Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.486690 4687 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 769h33m39.950477644s for next certificate rotation Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.551962 4687 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.554913 4687 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.572993 4687 log.go:25] "Validated CRI v1 runtime API" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.614870 4687 log.go:25] "Validated CRI v1 image API" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.616842 4687 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.623056 4687 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-08-58-28-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.623216 4687 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.647555 4687 manager.go:217] Machine: {Timestamp:2025-11-25 09:03:25.644450712 +0000 UTC m=+0.698090460 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:3d8948be-6f27-4904-9fe7-1878681451c2 BootID:59433806-9cd9-44e7-8e27-d03eb8a2fcda Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:d0:46:6d Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:d0:46:6d Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:c5:d3:e4 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:f8:50:26 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:f8:1c:6d Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:7f:0d:a9 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:9a:92:0e:0b:4a:3f Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:06:54:58:9d:c9:ed Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.647843 4687 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.648013 4687 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.649278 4687 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.649538 4687 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.649581 4687 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.652691 4687 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.652766 4687 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.653206 4687 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.653253 4687 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.653775 4687 state_mem.go:36] "Initialized new in-memory state store" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.653947 4687 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.658659 4687 kubelet.go:418] "Attempting to sync node with API server" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.658704 4687 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.658735 4687 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.658759 4687 kubelet.go:324] "Adding apiserver pod source" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.658813 4687 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.663731 4687 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.664828 4687 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.667375 4687 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.667910 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.668276 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.667924 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.668669 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669557 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669588 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669598 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669608 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669623 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669632 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669641 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669655 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669667 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669677 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669698 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.669707 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.671881 4687 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.672332 4687 server.go:1280] "Started kubelet" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.673476 4687 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.673473 4687 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.674165 4687 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 09:03:25 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.675441 4687 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.675871 4687 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.675931 4687 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.676143 4687 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 00:34:18.204038096 +0000 UTC Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.676201 4687 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1071h30m52.527841476s for next certificate rotation Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.681652 4687 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.681812 4687 server.go:460] "Adding debug handlers to kubelet server" Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.683230 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="200ms" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.683776 4687 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.683964 4687 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.684413 4687 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.685009 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.685711 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.691700 4687 factory.go:55] Registering systemd factory Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.692630 4687 factory.go:221] Registration of the systemd container factory successfully Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.693755 4687 factory.go:153] Registering CRI-O factory Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.693793 4687 factory.go:221] Registration of the crio container factory successfully Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.693924 4687 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.693990 4687 factory.go:103] Registering Raw factory Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.694025 4687 manager.go:1196] Started watching for new ooms in manager Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.695290 4687 manager.go:319] Starting recovery of all containers Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.692952 4687 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.246:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b34808712c77d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 09:03:25.672294269 +0000 UTC m=+0.725933987,LastTimestamp:2025-11-25 09:03:25.672294269 +0000 UTC m=+0.725933987,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696018 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696107 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696122 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696136 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696148 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696159 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696171 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696182 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696195 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696207 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696218 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696230 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696243 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696256 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696267 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696311 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696324 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696335 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696347 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696360 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696373 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696399 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696411 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696425 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696458 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696472 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696487 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696518 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696532 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696543 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696556 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696570 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696582 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696594 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696606 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696619 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696631 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696682 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696698 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696712 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696727 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696738 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696750 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696762 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696774 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696785 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696796 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696809 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696820 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696832 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696858 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696871 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696888 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696902 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696915 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696930 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696943 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696958 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696970 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696984 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.696997 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697009 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697022 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697037 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697050 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697063 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697075 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697088 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697101 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697114 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697126 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697140 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697158 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697170 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697182 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697193 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697205 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697219 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697229 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697243 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697254 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697265 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697282 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697296 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697308 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697324 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697336 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697348 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697359 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697370 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697381 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697394 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697405 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697418 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697429 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697443 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697454 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697466 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697478 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697490 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697551 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697567 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697579 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697590 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697609 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697624 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697638 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697650 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697664 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697678 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697689 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697705 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697717 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697732 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697748 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697760 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697774 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697787 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697800 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697812 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697824 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697837 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697848 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697862 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697875 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697887 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697898 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697911 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697923 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697934 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697945 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697956 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697970 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697982 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.697993 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698005 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698016 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698028 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698040 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698052 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698066 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698080 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698091 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698103 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698115 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698128 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698144 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698153 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698164 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698176 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698187 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698199 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698210 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698222 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698232 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698243 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698255 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698271 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698282 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698294 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698304 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698316 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698327 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698340 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.698361 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700443 4687 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700466 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700478 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700489 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700513 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700523 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700533 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700541 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700549 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700558 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700569 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700579 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700589 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700600 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700610 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700618 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700627 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700636 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700645 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700654 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700663 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700673 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700683 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700692 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700706 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700715 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700725 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700735 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700744 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700753 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700765 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700775 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700785 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700795 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700804 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700814 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700824 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700833 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700844 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700854 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700865 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700874 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700885 4687 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700893 4687 reconstruct.go:97] "Volume reconstruction finished" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.700901 4687 reconciler.go:26] "Reconciler: start to sync state" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.722315 4687 manager.go:324] Recovery completed Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.730225 4687 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.733349 4687 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.733420 4687 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.733461 4687 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.733578 4687 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 09:03:25 crc kubenswrapper[4687]: W1125 09:03:25.735419 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.735532 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.739460 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.741063 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.741130 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.741145 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.741894 4687 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.741919 4687 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.741941 4687 state_mem.go:36] "Initialized new in-memory state store" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.764972 4687 policy_none.go:49] "None policy: Start" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.766754 4687 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.766800 4687 state_mem.go:35] "Initializing new in-memory state store" Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.782661 4687 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.824018 4687 manager.go:334] "Starting Device Plugin manager" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.824057 4687 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.824068 4687 server.go:79] "Starting device plugin registration server" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.824415 4687 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.824432 4687 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.825019 4687 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.825104 4687 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.825114 4687 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.834487 4687 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.834580 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.834846 4687 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.835740 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.835764 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.835772 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.835920 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.836188 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.836233 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.836616 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.836642 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.836654 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.836742 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.836862 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.836925 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.837565 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.837592 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.837603 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.837749 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.837986 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.838029 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.838215 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.838252 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.838276 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.838361 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.838388 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.838397 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.838764 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.838790 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.838803 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.839016 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.839033 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.839041 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.839168 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.839685 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.839726 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.840266 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.840293 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.840306 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.840577 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.840609 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.840622 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.840766 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.840792 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.841277 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.841305 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.841315 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.884304 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="400ms" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904469 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904537 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904558 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904608 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904658 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904696 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904718 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904744 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904772 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904808 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904836 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904853 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904870 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904900 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.904931 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.924888 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.926567 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.926782 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.926937 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:25 crc kubenswrapper[4687]: I1125 09:03:25.927097 4687 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:03:25 crc kubenswrapper[4687]: E1125 09:03:25.927858 4687 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.246:6443: connect: connection refused" node="crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.005795 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.005849 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.005879 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.005904 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.005928 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.005952 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.005973 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006014 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006037 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006057 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006077 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006096 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006117 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006136 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006155 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006656 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006716 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006782 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006651 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006812 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006705 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006859 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006828 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006909 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006915 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006667 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.006964 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.007005 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.007030 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.007030 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.128869 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.134993 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.135108 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.135600 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.135662 4687 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:03:26 crc kubenswrapper[4687]: E1125 09:03:26.136168 4687 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.246:6443: connect: connection refused" node="crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.168732 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.188036 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.209603 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.221764 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.225086 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:26 crc kubenswrapper[4687]: W1125 09:03:26.242041 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-56c7bc87f4fd93b0ab6f85baab890b978b3399633b9611f5cae95c9eac4b2e94 WatchSource:0}: Error finding container 56c7bc87f4fd93b0ab6f85baab890b978b3399633b9611f5cae95c9eac4b2e94: Status 404 returned error can't find the container with id 56c7bc87f4fd93b0ab6f85baab890b978b3399633b9611f5cae95c9eac4b2e94 Nov 25 09:03:26 crc kubenswrapper[4687]: W1125 09:03:26.243040 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-ae975796676e8a450e7695ae6f25744b5855bc930baaad34b5a8372b86901608 WatchSource:0}: Error finding container ae975796676e8a450e7695ae6f25744b5855bc930baaad34b5a8372b86901608: Status 404 returned error can't find the container with id ae975796676e8a450e7695ae6f25744b5855bc930baaad34b5a8372b86901608 Nov 25 09:03:26 crc kubenswrapper[4687]: W1125 09:03:26.254099 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-da097db22f18987eef29512664652c33154206c19a829562d252f5ccbc6e3fa2 WatchSource:0}: Error finding container da097db22f18987eef29512664652c33154206c19a829562d252f5ccbc6e3fa2: Status 404 returned error can't find the container with id da097db22f18987eef29512664652c33154206c19a829562d252f5ccbc6e3fa2 Nov 25 09:03:26 crc kubenswrapper[4687]: W1125 09:03:26.261871 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-e5761f54f8f7ba2d89e8ce3de8c0a7567a907a83bf257e354220ed1fd134c018 WatchSource:0}: Error finding container e5761f54f8f7ba2d89e8ce3de8c0a7567a907a83bf257e354220ed1fd134c018: Status 404 returned error can't find the container with id e5761f54f8f7ba2d89e8ce3de8c0a7567a907a83bf257e354220ed1fd134c018 Nov 25 09:03:26 crc kubenswrapper[4687]: E1125 09:03:26.285806 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="800ms" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.536309 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.537776 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.537813 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.537822 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.537848 4687 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:03:26 crc kubenswrapper[4687]: E1125 09:03:26.538283 4687 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.246:6443: connect: connection refused" node="crc" Nov 25 09:03:26 crc kubenswrapper[4687]: W1125 09:03:26.595345 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:26 crc kubenswrapper[4687]: E1125 09:03:26.595452 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:26 crc kubenswrapper[4687]: W1125 09:03:26.645461 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:26 crc kubenswrapper[4687]: E1125 09:03:26.645552 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.676592 4687 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.741803 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"56c7bc87f4fd93b0ab6f85baab890b978b3399633b9611f5cae95c9eac4b2e94"} Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.744949 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e5761f54f8f7ba2d89e8ce3de8c0a7567a907a83bf257e354220ed1fd134c018"} Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.746041 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"da097db22f18987eef29512664652c33154206c19a829562d252f5ccbc6e3fa2"} Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.747456 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"280be63b77336b454ff4b2bc28ae565edad9dfae5dcf7d1b093030e70ec3a805"} Nov 25 09:03:26 crc kubenswrapper[4687]: I1125 09:03:26.748251 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ae975796676e8a450e7695ae6f25744b5855bc930baaad34b5a8372b86901608"} Nov 25 09:03:27 crc kubenswrapper[4687]: E1125 09:03:27.086904 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="1.6s" Nov 25 09:03:27 crc kubenswrapper[4687]: W1125 09:03:27.096440 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:27 crc kubenswrapper[4687]: E1125 09:03:27.096562 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:27 crc kubenswrapper[4687]: W1125 09:03:27.266727 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:27 crc kubenswrapper[4687]: E1125 09:03:27.266859 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.338918 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.340475 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.340532 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.340546 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.340572 4687 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:03:27 crc kubenswrapper[4687]: E1125 09:03:27.341011 4687 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.246:6443: connect: connection refused" node="crc" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.677088 4687 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.754037 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8"} Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.754088 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c"} Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.754102 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8"} Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.754113 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3"} Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.754155 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.755715 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.755765 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.755977 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.757687 4687 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73" exitCode=0 Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.757761 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73"} Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.757996 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.759120 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.759160 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.759172 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.760573 4687 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="4915aa39d3cae307d0bbdcec35149536659bff2d627bbeb3b3efcf4309ed922c" exitCode=0 Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.760628 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"4915aa39d3cae307d0bbdcec35149536659bff2d627bbeb3b3efcf4309ed922c"} Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.760787 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.761136 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.761986 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.762035 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.762052 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.763110 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.763154 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.763171 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.763247 4687 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b" exitCode=0 Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.763277 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b"} Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.763312 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.764070 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.764096 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.764106 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.765650 4687 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="9f90f83416838f8073253a8a9f36244bc6810976a24dd836a7e4a1f1d685e60d" exitCode=0 Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.765698 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"9f90f83416838f8073253a8a9f36244bc6810976a24dd836a7e4a1f1d685e60d"} Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.765720 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.767455 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.767543 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:27 crc kubenswrapper[4687]: I1125 09:03:27.767569 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.056282 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.677031 4687 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:28 crc kubenswrapper[4687]: E1125 09:03:28.687852 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="3.2s" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.771469 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"9ecdc1a162e491f6c98ecee45e49897884410f336f2735113e00d110f86e0e4a"} Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.771529 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5f9867bd6213ab7153f99348331e4fedb1cac3236436c454ab2974ee3fdb1d9b"} Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.771543 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"fed588c0fe3b1c0c10c1ff1a154c1ae83a483da23e9b38c0b919a2b293a76b21"} Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.771575 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.772884 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.772920 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.772931 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.775863 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"58b0936b99680485c59502e5c0a76d33bc48294cfd52e3c913a0120b62a856a1"} Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.775920 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.776925 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.776953 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.776966 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.779514 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce"} Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.779549 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0"} Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.779563 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48"} Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.779575 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f"} Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.781389 4687 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="cbb106924fdb53a6998ffb913dafedfe33521fe69f4b401022f0c58f979691ec" exitCode=0 Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.781434 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"cbb106924fdb53a6998ffb913dafedfe33521fe69f4b401022f0c58f979691ec"} Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.781526 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.781539 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.782462 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.782514 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.782541 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.782554 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.782496 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.782613 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.941123 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.942267 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.942320 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.942333 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:28 crc kubenswrapper[4687]: I1125 09:03:28.942362 4687 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:03:28 crc kubenswrapper[4687]: E1125 09:03:28.942955 4687 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.246:6443: connect: connection refused" node="crc" Nov 25 09:03:29 crc kubenswrapper[4687]: W1125 09:03:29.073151 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:29 crc kubenswrapper[4687]: E1125 09:03:29.073240 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:29 crc kubenswrapper[4687]: W1125 09:03:29.167452 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:29 crc kubenswrapper[4687]: E1125 09:03:29.167605 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:29 crc kubenswrapper[4687]: W1125 09:03:29.204949 4687 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.246:6443: connect: connection refused Nov 25 09:03:29 crc kubenswrapper[4687]: E1125 09:03:29.205037 4687 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.246:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.789933 4687 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="fc9e1af593911bf14a2c855a6733b4bb4d22d52e257560aed8b84f66818e95ab" exitCode=0 Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.790000 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"fc9e1af593911bf14a2c855a6733b4bb4d22d52e257560aed8b84f66818e95ab"} Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.790173 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.795388 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.795439 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.795456 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.799739 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.799922 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"650a8fd9996b0b1d824678857b7f6a2fad54ff8816e5a9145696a7e7428de790"} Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.800052 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.800050 4687 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.800199 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.800321 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.800760 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.800800 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.800813 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.801107 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.801138 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.801152 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.801696 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.801726 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.801738 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.801770 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.801811 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:29 crc kubenswrapper[4687]: I1125 09:03:29.801832 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.097254 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.805873 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.806356 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f3ceff46ab6cff536f160ccbc3050e88810cf658c0dd96fc8fcd3be619a87ada"} Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.806399 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ee0d0493d08d4eaf2f9d0e476b45c160c958469c183757219b787f09ab152cca"} Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.806418 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"8b815780087126b2c8d8b8ec45b673e753d8b1563a96e0dd9381cffa9e49f331"} Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.806486 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.807227 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.807254 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.807267 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.937316 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.937541 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.938876 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.938916 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.938926 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:30 crc kubenswrapper[4687]: I1125 09:03:30.945940 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.510879 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.511034 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.512218 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.512258 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.512266 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.813246 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ef8debcf38910327db36439311e2eb60d65f8635a265251e0000868ca0ac08a2"} Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.813297 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9ddb66748f9c9e61538d9b8fa023f3842558557989f7c54e5a713d1a66407165"} Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.813315 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.813396 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.813410 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.814443 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.814480 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.814491 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.814577 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.814606 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.814617 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.814619 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.814644 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:31 crc kubenswrapper[4687]: I1125 09:03:31.814656 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.143815 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.145069 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.145129 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.145149 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.145181 4687 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.552656 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.778345 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.815759 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.815758 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.815714 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.817099 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.817137 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.817149 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.817168 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.817240 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.817251 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.817188 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.817311 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:32 crc kubenswrapper[4687]: I1125 09:03:32.817321 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:33 crc kubenswrapper[4687]: I1125 09:03:33.626864 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:33 crc kubenswrapper[4687]: I1125 09:03:33.820663 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:33 crc kubenswrapper[4687]: I1125 09:03:33.822161 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:33 crc kubenswrapper[4687]: I1125 09:03:33.822217 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:33 crc kubenswrapper[4687]: I1125 09:03:33.822242 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:35 crc kubenswrapper[4687]: I1125 09:03:35.455025 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 09:03:35 crc kubenswrapper[4687]: I1125 09:03:35.455265 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:35 crc kubenswrapper[4687]: I1125 09:03:35.457163 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:35 crc kubenswrapper[4687]: I1125 09:03:35.457202 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:35 crc kubenswrapper[4687]: I1125 09:03:35.457212 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:35 crc kubenswrapper[4687]: E1125 09:03:35.835436 4687 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 09:03:36 crc kubenswrapper[4687]: I1125 09:03:36.627747 4687 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 09:03:36 crc kubenswrapper[4687]: I1125 09:03:36.627835 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.432256 4687 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.432535 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.498324 4687 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.498403 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.505783 4687 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.505849 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.837076 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.839554 4687 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="650a8fd9996b0b1d824678857b7f6a2fad54ff8816e5a9145696a7e7428de790" exitCode=255 Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.839594 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"650a8fd9996b0b1d824678857b7f6a2fad54ff8816e5a9145696a7e7428de790"} Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.839723 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.840545 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.840573 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.840585 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:39 crc kubenswrapper[4687]: I1125 09:03:39.840999 4687 scope.go:117] "RemoveContainer" containerID="650a8fd9996b0b1d824678857b7f6a2fad54ff8816e5a9145696a7e7428de790" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.670420 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.670632 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.671658 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.671703 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.671719 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.745891 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.844277 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.846659 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.847720 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1"} Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.847931 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.848168 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.848208 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.848218 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.848917 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.848957 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.848968 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:40 crc kubenswrapper[4687]: I1125 09:03:40.869771 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 09:03:41 crc kubenswrapper[4687]: I1125 09:03:41.849401 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:41 crc kubenswrapper[4687]: I1125 09:03:41.850783 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:41 crc kubenswrapper[4687]: I1125 09:03:41.850826 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:41 crc kubenswrapper[4687]: I1125 09:03:41.850839 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.558237 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.558495 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.558556 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.560056 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.560100 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.560114 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.562184 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.785194 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.785372 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.786391 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.786440 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.786457 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.851143 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.852031 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.852104 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:42 crc kubenswrapper[4687]: I1125 09:03:42.852205 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:43 crc kubenswrapper[4687]: I1125 09:03:43.853025 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:43 crc kubenswrapper[4687]: I1125 09:03:43.853976 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:43 crc kubenswrapper[4687]: I1125 09:03:43.854007 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:43 crc kubenswrapper[4687]: I1125 09:03:43.854017 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.502839 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.504737 4687 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.506415 4687 trace.go:236] Trace[1916714749]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:03:29.590) (total time: 14915ms): Nov 25 09:03:44 crc kubenswrapper[4687]: Trace[1916714749]: ---"Objects listed" error: 14915ms (09:03:44.506) Nov 25 09:03:44 crc kubenswrapper[4687]: Trace[1916714749]: [14.915936505s] [14.915936505s] END Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.506443 4687 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.506965 4687 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.507485 4687 trace.go:236] Trace[711581045]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:03:34.059) (total time: 10448ms): Nov 25 09:03:44 crc kubenswrapper[4687]: Trace[711581045]: ---"Objects listed" error: 10448ms (09:03:44.507) Nov 25 09:03:44 crc kubenswrapper[4687]: Trace[711581045]: [10.448369498s] [10.448369498s] END Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.507530 4687 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.507554 4687 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.509535 4687 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.673537 4687 apiserver.go:52] "Watching apiserver" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.677332 4687 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.677819 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.678173 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.678256 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.678268 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.678259 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.678327 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.678368 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.678700 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.678706 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.678767 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.681623 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.682055 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.682131 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.682221 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.682227 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.682333 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.682414 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.683556 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.683464 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.685706 4687 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708631 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708685 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708710 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708730 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708749 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708767 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708786 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708804 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708833 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708852 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708870 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708887 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708903 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708921 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708937 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708955 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708971 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.708989 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709007 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709024 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709042 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709059 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709075 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709076 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709090 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709135 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709159 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709180 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709201 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709223 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709245 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709267 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709285 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709306 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709325 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709346 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709400 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709420 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709439 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709458 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709477 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709497 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709534 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709553 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709571 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709591 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709651 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709717 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709732 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709741 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709760 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709779 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709797 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709817 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709836 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709854 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709875 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709893 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709910 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709927 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709945 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709963 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709981 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.709998 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710020 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710041 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710058 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710077 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710099 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710118 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710137 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710159 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710174 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710183 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710204 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710230 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710248 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710267 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710285 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710303 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710332 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710349 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710370 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710388 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710401 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710408 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710427 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710447 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710468 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710532 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710550 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710569 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710589 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710611 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710628 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710633 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710645 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710662 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710680 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710748 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710771 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710790 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710809 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710831 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710850 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710871 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710890 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710908 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710927 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710947 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710964 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710968 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.710984 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711002 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711021 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711043 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711064 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711080 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711097 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711116 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711133 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711150 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711168 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711185 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711203 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711225 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711247 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711314 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711336 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711357 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711376 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711397 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711420 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711439 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711457 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711475 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711495 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711530 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711550 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711569 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711578 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711587 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711606 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711626 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711646 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711677 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711696 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711717 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711737 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711757 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711779 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711801 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711827 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711848 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711874 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711895 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711917 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711936 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711957 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711978 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.711998 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712020 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712000 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712041 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712095 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712135 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712167 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712197 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712219 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712225 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712405 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712466 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712492 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712528 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712538 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712551 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712600 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712627 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712650 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712648 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712670 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712691 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712710 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712733 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712742 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712753 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712772 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712794 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712811 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712828 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712846 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712865 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712881 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712898 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712915 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712939 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712940 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712951 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712957 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712977 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.712995 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713014 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713034 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713061 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713093 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713120 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713145 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713170 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713185 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713195 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713190 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713244 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713271 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713321 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713370 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713403 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713437 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713465 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713494 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713539 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713567 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713595 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713621 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713651 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713677 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713705 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713730 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713765 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713816 4687 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713833 4687 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713849 4687 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713864 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713879 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713891 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713902 4687 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713914 4687 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713922 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713927 4687 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713962 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713974 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713984 4687 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.713993 4687 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714046 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714169 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714174 4687 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714221 4687 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714238 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714252 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714486 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714514 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714592 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714787 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.714785 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.715184 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.715225 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.715228 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.715270 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.715306 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.715529 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.715494 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.715715 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.715932 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.716041 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.716082 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.716092 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.716160 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.716443 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.716610 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.716624 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.716824 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.716963 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.717210 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.717384 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.717586 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.717784 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.717833 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.718131 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.718749 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.718896 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.718949 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.719044 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.719240 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.719402 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.719901 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.720182 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.720769 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.720829 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.720880 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.720954 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.721180 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.721298 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.722253 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.722662 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.722693 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.722665 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.722759 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.722771 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.722755 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.722798 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.723391 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.723474 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.723517 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.723535 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.723733 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.723842 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.723870 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.724058 4687 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724089 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724091 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.723969 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.724178 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:45.224131897 +0000 UTC m=+20.277771825 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724197 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.724304 4687 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.724354 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:45.224344742 +0000 UTC m=+20.277984650 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724457 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724483 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724595 4687 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724601 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724073 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724804 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724829 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.724959 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725020 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725083 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725124 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725310 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725330 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725312 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725418 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725624 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725689 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725632 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725680 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.725936 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.726100 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.726103 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.726118 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.726158 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:03:45.226128738 +0000 UTC m=+20.279768656 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.726177 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.726602 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.726663 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.726780 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.726786 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.726926 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.726963 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.727050 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.727250 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.727330 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.727266 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.727647 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.727746 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.727732 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.727879 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.728121 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.728136 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.728433 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.728522 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.728786 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.729106 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.729379 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.729646 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.729875 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.729967 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.727386 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.730320 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.730661 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.730747 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.730938 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.731372 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.731433 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.731438 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.731689 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.731783 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.731894 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.731903 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.731912 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.731965 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.732287 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.732327 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.732365 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.732451 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.732682 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.732743 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.732962 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.733020 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.733293 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.733754 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.733984 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.734291 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.734610 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.734953 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.735186 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.735784 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.735858 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.736342 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.736389 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.736749 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.736755 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.737056 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.737155 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.737671 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.737926 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.738435 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.738772 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.739838 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.739905 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.739923 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.740153 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.740222 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.740269 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.740646 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.741841 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.742105 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.742162 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.742458 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.742777 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.742849 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.742868 4687 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.742927 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.743028 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:45.243004163 +0000 UTC m=+20.296644081 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.743320 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.743523 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.744118 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.744132 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.744210 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.744265 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.744283 4687 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:44 crc kubenswrapper[4687]: E1125 09:03:44.744377 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:45.244355918 +0000 UTC m=+20.297995706 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.745049 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.746931 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.747218 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.747638 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.750875 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.753150 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.753628 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.753972 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.755151 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.755543 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.755703 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.764312 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.764484 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.775420 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.782918 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.785581 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.796369 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.796608 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.803485 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.805490 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.805647 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.806142 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.813836 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.814930 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815588 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815658 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815732 4687 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815744 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815753 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815785 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815795 4687 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815803 4687 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815812 4687 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815836 4687 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815860 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815869 4687 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815877 4687 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815887 4687 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815896 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815904 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815915 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815927 4687 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815939 4687 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815950 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815959 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815968 4687 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815977 4687 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815985 4687 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.815994 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816003 4687 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816011 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816026 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816037 4687 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816047 4687 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816102 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816123 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816179 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816193 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816209 4687 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816219 4687 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816230 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816240 4687 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816249 4687 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816260 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816460 4687 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816470 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816481 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816493 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816528 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816540 4687 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816550 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816561 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816572 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816584 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816594 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816603 4687 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816613 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816624 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816635 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816644 4687 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816657 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816674 4687 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816685 4687 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816696 4687 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816706 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816716 4687 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816726 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816736 4687 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816746 4687 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816755 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816765 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816775 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816786 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816798 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816807 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816819 4687 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816831 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816843 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816856 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816868 4687 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816881 4687 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816890 4687 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816900 4687 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816910 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816920 4687 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816931 4687 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816944 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816953 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816962 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816973 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816983 4687 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.816996 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817010 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817021 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817030 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817039 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817049 4687 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817057 4687 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817066 4687 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817075 4687 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817084 4687 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817094 4687 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817103 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817112 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817121 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817130 4687 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817139 4687 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817148 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817160 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817169 4687 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817178 4687 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817186 4687 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817195 4687 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817204 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817215 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817224 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817235 4687 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817246 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817255 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817266 4687 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817278 4687 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817291 4687 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817304 4687 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817318 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817330 4687 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817342 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817353 4687 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817361 4687 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817370 4687 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817380 4687 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817389 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817399 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817411 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817421 4687 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817431 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817440 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817449 4687 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817458 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817467 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817476 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817486 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817495 4687 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817530 4687 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817539 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817548 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817557 4687 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817566 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817575 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817584 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817593 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817603 4687 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817615 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817627 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817639 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817652 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817663 4687 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817673 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817682 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817691 4687 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817701 4687 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817710 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817720 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817730 4687 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817740 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817750 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817759 4687 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817769 4687 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817778 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817787 4687 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817798 4687 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817808 4687 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817817 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817827 4687 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817836 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817845 4687 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817854 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817863 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817873 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817883 4687 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817892 4687 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817902 4687 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817911 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.817921 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:44 crc kubenswrapper[4687]: I1125 09:03:44.822115 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.000866 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.006214 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.024634 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:03:45 crc kubenswrapper[4687]: W1125 09:03:45.025909 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-c414b307f4b1e05d760b311e2d4a7883a588981a91f547d8f5e70fa96ab01609 WatchSource:0}: Error finding container c414b307f4b1e05d760b311e2d4a7883a588981a91f547d8f5e70fa96ab01609: Status 404 returned error can't find the container with id c414b307f4b1e05d760b311e2d4a7883a588981a91f547d8f5e70fa96ab01609 Nov 25 09:03:45 crc kubenswrapper[4687]: W1125 09:03:45.027647 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-bcf41b66a754601ac39d270c50f2d31a0a8b29e4bb227011bcafaade8140d2d1 WatchSource:0}: Error finding container bcf41b66a754601ac39d270c50f2d31a0a8b29e4bb227011bcafaade8140d2d1: Status 404 returned error can't find the container with id bcf41b66a754601ac39d270c50f2d31a0a8b29e4bb227011bcafaade8140d2d1 Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.322394 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.322738 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:03:46.32269182 +0000 UTC m=+21.376331548 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.323020 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.323066 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.323121 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.323147 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323213 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323251 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323261 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323294 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323308 4687 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323366 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:46.323353988 +0000 UTC m=+21.376993856 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323365 4687 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323269 4687 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323462 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:46.32344273 +0000 UTC m=+21.377082618 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323494 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:46.323478541 +0000 UTC m=+21.377118449 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323270 4687 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.323746 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:46.323703646 +0000 UTC m=+21.377343364 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.399959 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.405361 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.412746 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.414076 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.423242 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.434836 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.448292 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.458464 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.468001 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.478855 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.489028 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.500324 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.511110 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.521741 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.533877 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.569001 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.746924 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.747901 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.749672 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.751222 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.753121 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.753928 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.754811 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.756342 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.757342 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.758721 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.759415 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.761048 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.761921 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.762719 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.764325 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.765306 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.767024 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.767721 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.768850 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.770498 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.771052 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.772148 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.772224 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.772670 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.773708 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.774118 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.774811 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.775896 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.776343 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.777304 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.778024 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.779012 4687 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.779119 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.781083 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.782228 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.782697 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.785084 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.787240 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.788641 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.790036 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.790843 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.791792 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.792427 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.792465 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.793540 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.794858 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.795563 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.796641 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.797259 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.798677 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.799218 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.800176 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.801980 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.802840 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.803456 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.804543 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.807384 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.823963 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.840582 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.859580 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a"} Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.859635 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1"} Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.859649 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"87f783272af4e092035d275dbaa3945edb56ffbff8fbc09990ef2a65c052501c"} Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.861399 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"bcf41b66a754601ac39d270c50f2d31a0a8b29e4bb227011bcafaade8140d2d1"} Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.863742 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890"} Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.863771 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"c414b307f4b1e05d760b311e2d4a7883a588981a91f547d8f5e70fa96ab01609"} Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.866085 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: E1125 09:03:45.881997 4687 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.882806 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.899949 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.912325 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.926063 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.937584 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.950943 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.964185 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:45 crc kubenswrapper[4687]: I1125 09:03:45.979626 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.331171 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.331243 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331282 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:03:48.331259069 +0000 UTC m=+23.384898787 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.331321 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.331350 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.331382 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331692 4687 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331710 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331728 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331718 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331781 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331800 4687 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331739 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:48.331725101 +0000 UTC m=+23.385364819 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331909 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:48.331882005 +0000 UTC m=+23.385521713 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331730 4687 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331961 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:48.331952106 +0000 UTC m=+23.385591824 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.331741 4687 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.332001 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:48.331994737 +0000 UTC m=+23.385634455 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.734223 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.734393 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.734853 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.734995 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.734872 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.735077 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.867369 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.868027 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.869995 4687 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1" exitCode=255 Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.870352 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1"} Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.870455 4687 scope.go:117] "RemoveContainer" containerID="650a8fd9996b0b1d824678857b7f6a2fad54ff8816e5a9145696a7e7428de790" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.885253 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.885491 4687 scope.go:117] "RemoveContainer" containerID="9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1" Nov 25 09:03:46 crc kubenswrapper[4687]: E1125 09:03:46.886065 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.897255 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.912068 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.924935 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.940119 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.974184 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.987731 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:46 crc kubenswrapper[4687]: I1125 09:03:46.999781 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:47 crc kubenswrapper[4687]: I1125 09:03:47.874659 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 09:03:47 crc kubenswrapper[4687]: I1125 09:03:47.877452 4687 scope.go:117] "RemoveContainer" containerID="9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1" Nov 25 09:03:47 crc kubenswrapper[4687]: E1125 09:03:47.877643 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 09:03:47 crc kubenswrapper[4687]: I1125 09:03:47.878547 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909"} Nov 25 09:03:47 crc kubenswrapper[4687]: I1125 09:03:47.892992 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:47 crc kubenswrapper[4687]: I1125 09:03:47.907815 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:47 crc kubenswrapper[4687]: I1125 09:03:47.925129 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:47 crc kubenswrapper[4687]: I1125 09:03:47.940384 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:47 crc kubenswrapper[4687]: I1125 09:03:47.954546 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:47 crc kubenswrapper[4687]: I1125 09:03:47.968748 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:47 crc kubenswrapper[4687]: I1125 09:03:47.984203 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.001205 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.021261 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.038654 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.052129 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.065940 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.078297 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.091935 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.102738 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.119210 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.348691 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.348823 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.348869 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:03:52.348842817 +0000 UTC m=+27.402482545 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.348934 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.348985 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349007 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349023 4687 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349037 4687 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.348984 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349076 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:52.349062042 +0000 UTC m=+27.402701780 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349098 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:52.349088433 +0000 UTC m=+27.402728161 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.349121 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349183 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349218 4687 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349223 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349246 4687 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349261 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:52.349249997 +0000 UTC m=+27.402889725 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.349306 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:03:52.349287188 +0000 UTC m=+27.402926986 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.734665 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.734695 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:48 crc kubenswrapper[4687]: I1125 09:03:48.734777 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.734808 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.734846 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:03:48 crc kubenswrapper[4687]: E1125 09:03:48.734902 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.345744 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-vcqct"] Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.346072 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.347444 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-g8h9r"] Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.351164 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-wlzrb"] Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.351249 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.351342 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.351492 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-9zmf6"] Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.351743 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.351803 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.351823 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.351831 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.352211 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.352325 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-9zmf6" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.354157 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.354284 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.354458 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.354609 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.354742 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.354795 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.355355 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.355400 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.357039 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.357567 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.366707 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.394336 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.409456 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.434898 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.450432 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466115 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-system-cni-dir\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466198 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-run-netns\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466227 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4r75\" (UniqueName: \"kubernetes.io/projected/37ae5d60-327b-4f2d-83c3-bd775960a7ad-kube-api-access-q4r75\") pod \"node-resolver-9zmf6\" (UID: \"37ae5d60-327b-4f2d-83c3-bd775960a7ad\") " pod="openshift-dns/node-resolver-9zmf6" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466245 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-proxy-tls\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466260 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-cni-dir\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466288 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/37ae5d60-327b-4f2d-83c3-bd775960a7ad-hosts-file\") pod \"node-resolver-9zmf6\" (UID: \"37ae5d60-327b-4f2d-83c3-bd775960a7ad\") " pod="openshift-dns/node-resolver-9zmf6" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466304 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-os-release\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466319 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-run-k8s-cni-cncf-io\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466333 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/9736e597-ba61-47a5-b1e2-02b151c5cac0-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466351 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-mcd-auth-proxy-config\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466377 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-socket-dir-parent\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466395 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-var-lib-cni-multus\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466425 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-etc-kubernetes\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466451 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-cnibin\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466480 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-var-lib-cni-bin\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466524 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-conf-dir\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466543 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-os-release\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466566 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-cnibin\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466586 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-cni-binary-copy\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466604 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-daemon-config\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466640 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-rootfs\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466663 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-run-multus-certs\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466682 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ncmd\" (UniqueName: \"kubernetes.io/projected/9736e597-ba61-47a5-b1e2-02b151c5cac0-kube-api-access-8ncmd\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466701 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-system-cni-dir\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466715 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlsnj\" (UniqueName: \"kubernetes.io/projected/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-kube-api-access-xlsnj\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466730 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-hostroot\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466745 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-tuning-conf-dir\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466761 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cprtj\" (UniqueName: \"kubernetes.io/projected/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-kube-api-access-cprtj\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466776 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9736e597-ba61-47a5-b1e2-02b151c5cac0-cni-binary-copy\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.466792 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-var-lib-kubelet\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.467429 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.481755 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.495886 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.512161 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.528989 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.541909 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.554295 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567263 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-system-cni-dir\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567311 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-run-netns\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567415 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-system-cni-dir\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567604 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-run-netns\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567688 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-cni-dir\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567721 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4r75\" (UniqueName: \"kubernetes.io/projected/37ae5d60-327b-4f2d-83c3-bd775960a7ad-kube-api-access-q4r75\") pod \"node-resolver-9zmf6\" (UID: \"37ae5d60-327b-4f2d-83c3-bd775960a7ad\") " pod="openshift-dns/node-resolver-9zmf6" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567745 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-proxy-tls\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567769 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-run-k8s-cni-cncf-io\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567793 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/9736e597-ba61-47a5-b1e2-02b151c5cac0-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567828 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/37ae5d60-327b-4f2d-83c3-bd775960a7ad-hosts-file\") pod \"node-resolver-9zmf6\" (UID: \"37ae5d60-327b-4f2d-83c3-bd775960a7ad\") " pod="openshift-dns/node-resolver-9zmf6" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567849 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-os-release\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567871 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-mcd-auth-proxy-config\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567892 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-etc-kubernetes\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567885 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567914 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-cnibin\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567921 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-run-k8s-cni-cncf-io\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567946 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-socket-dir-parent\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.567994 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-socket-dir-parent\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568017 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-var-lib-cni-multus\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568042 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-cni-dir\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568092 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-var-lib-cni-multus\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568045 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-var-lib-cni-bin\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568065 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-etc-kubernetes\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568174 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-cnibin\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568185 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/37ae5d60-327b-4f2d-83c3-bd775960a7ad-hosts-file\") pod \"node-resolver-9zmf6\" (UID: \"37ae5d60-327b-4f2d-83c3-bd775960a7ad\") " pod="openshift-dns/node-resolver-9zmf6" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568068 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-var-lib-cni-bin\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568177 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-conf-dir\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568211 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-conf-dir\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568229 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-os-release\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568249 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-daemon-config\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568270 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-cnibin\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568286 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-cni-binary-copy\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568303 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-rootfs\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568319 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-run-multus-certs\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568336 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ncmd\" (UniqueName: \"kubernetes.io/projected/9736e597-ba61-47a5-b1e2-02b151c5cac0-kube-api-access-8ncmd\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568361 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-system-cni-dir\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568378 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-tuning-conf-dir\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568395 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlsnj\" (UniqueName: \"kubernetes.io/projected/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-kube-api-access-xlsnj\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568410 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-hostroot\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568427 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cprtj\" (UniqueName: \"kubernetes.io/projected/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-kube-api-access-cprtj\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568444 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9736e597-ba61-47a5-b1e2-02b151c5cac0-cni-binary-copy\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568459 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-var-lib-kubelet\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568522 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-var-lib-kubelet\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568625 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-os-release\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568665 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-system-cni-dir\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568666 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-hostroot\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568724 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-cnibin\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568780 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/9736e597-ba61-47a5-b1e2-02b151c5cac0-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568780 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-mcd-auth-proxy-config\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568851 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-host-run-multus-certs\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.568890 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-rootfs\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.569147 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-multus-daemon-config\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.569210 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-os-release\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.569435 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9736e597-ba61-47a5-b1e2-02b151c5cac0-cni-binary-copy\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.569557 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-cni-binary-copy\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.571169 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9736e597-ba61-47a5-b1e2-02b151c5cac0-tuning-conf-dir\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.575085 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-proxy-tls\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.582148 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.583962 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlsnj\" (UniqueName: \"kubernetes.io/projected/ac5dd7d0-d24d-411e-a7d0-3e921f218f4c-kube-api-access-xlsnj\") pod \"machine-config-daemon-vcqct\" (UID: \"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\") " pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.588975 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cprtj\" (UniqueName: \"kubernetes.io/projected/0e7c96e4-c7fa-466f-b0b6-495612ed71f8-kube-api-access-cprtj\") pod \"multus-wlzrb\" (UID: \"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\") " pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.591878 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ncmd\" (UniqueName: \"kubernetes.io/projected/9736e597-ba61-47a5-b1e2-02b151c5cac0-kube-api-access-8ncmd\") pod \"multus-additional-cni-plugins-g8h9r\" (UID: \"9736e597-ba61-47a5-b1e2-02b151c5cac0\") " pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.598644 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.600074 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4r75\" (UniqueName: \"kubernetes.io/projected/37ae5d60-327b-4f2d-83c3-bd775960a7ad-kube-api-access-q4r75\") pod \"node-resolver-9zmf6\" (UID: \"37ae5d60-327b-4f2d-83c3-bd775960a7ad\") " pod="openshift-dns/node-resolver-9zmf6" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.613974 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.629081 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.639825 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.652320 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.660808 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.662525 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.669622 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" Nov 25 09:03:50 crc kubenswrapper[4687]: W1125 09:03:50.678667 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac5dd7d0_d24d_411e_a7d0_3e921f218f4c.slice/crio-51e32f1344410c3e75ce637bdcc2caf937281be028df88c1c7aa33a984ee60bb WatchSource:0}: Error finding container 51e32f1344410c3e75ce637bdcc2caf937281be028df88c1c7aa33a984ee60bb: Status 404 returned error can't find the container with id 51e32f1344410c3e75ce637bdcc2caf937281be028df88c1c7aa33a984ee60bb Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.680226 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-wlzrb" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.680275 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.686616 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-9zmf6" Nov 25 09:03:50 crc kubenswrapper[4687]: W1125 09:03:50.723822 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e7c96e4_c7fa_466f_b0b6_495612ed71f8.slice/crio-e58e9b54eb14c351c28a33f93d428c3869c044846423669d1f089b25545d76c2 WatchSource:0}: Error finding container e58e9b54eb14c351c28a33f93d428c3869c044846423669d1f089b25545d76c2: Status 404 returned error can't find the container with id e58e9b54eb14c351c28a33f93d428c3869c044846423669d1f089b25545d76c2 Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.733667 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:50 crc kubenswrapper[4687]: E1125 09:03:50.733777 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.733836 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:50 crc kubenswrapper[4687]: E1125 09:03:50.733878 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.733913 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:50 crc kubenswrapper[4687]: E1125 09:03:50.733949 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.741702 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-p68hx"] Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.742404 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.745217 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.745567 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.746470 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.746905 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.746938 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.747060 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.747185 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.777904 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.800791 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.839106 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.867437 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.874277 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-slash\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.874313 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.874340 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-kubelet\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.874356 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-netns\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.874370 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-etc-openvswitch\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.874386 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9jb5\" (UniqueName: \"kubernetes.io/projected/d371271f-84c3-405c-b41f-604a06c1bb71-kube-api-access-l9jb5\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.874401 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-script-lib\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.874417 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-ovn\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.874429 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-bin\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.874449 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-systemd-units\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.875053 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-config\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.875100 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-env-overrides\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.875137 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-var-lib-openvswitch\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.875159 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-log-socket\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.875180 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-ovn-kubernetes\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.875201 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-systemd\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.875226 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-openvswitch\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.875250 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-node-log\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.875273 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-netd\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.875297 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d371271f-84c3-405c-b41f-604a06c1bb71-ovn-node-metrics-cert\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.887136 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.892909 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wlzrb" event={"ID":"0e7c96e4-c7fa-466f-b0b6-495612ed71f8","Type":"ContainerStarted","Data":"e58e9b54eb14c351c28a33f93d428c3869c044846423669d1f089b25545d76c2"} Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.895579 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-9zmf6" event={"ID":"37ae5d60-327b-4f2d-83c3-bd775960a7ad","Type":"ContainerStarted","Data":"f9359eb0ff7c62cec9bc5bb85ceef760bc2eb67f0fc041d6fe7a4ac5fbcc99bc"} Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.897858 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.900238 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerStarted","Data":"99ffc214cf8dc73cf0a0427ba0c85909d6681104152e8edfea58bc57311521db"} Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.902193 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174"} Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.902242 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"51e32f1344410c3e75ce637bdcc2caf937281be028df88c1c7aa33a984ee60bb"} Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.907212 4687 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.909938 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.909972 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.909982 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.910085 4687 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.910667 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.916626 4687 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.917172 4687 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.918455 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.918483 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.918493 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.918523 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.918536 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:50Z","lastTransitionTime":"2025-11-25T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.922151 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: E1125 09:03:50.934658 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.935104 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.945781 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.945813 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.945821 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.945835 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.945845 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:50Z","lastTransitionTime":"2025-11-25T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.961287 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: E1125 09:03:50.961449 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.969361 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.969387 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.969411 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.969426 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.969436 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:50Z","lastTransitionTime":"2025-11-25T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976345 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-slash\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976393 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976410 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-kubelet\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976426 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-netns\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976455 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-etc-openvswitch\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976490 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-script-lib\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976540 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9jb5\" (UniqueName: \"kubernetes.io/projected/d371271f-84c3-405c-b41f-604a06c1bb71-kube-api-access-l9jb5\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976558 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-ovn\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976574 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-bin\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976618 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-systemd-units\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976638 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-config\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976661 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-env-overrides\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976697 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-var-lib-openvswitch\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976722 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-log-socket\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976850 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-ovn-kubernetes\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976869 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-systemd\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976887 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-openvswitch\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976929 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-node-log\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976949 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-netd\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.976970 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d371271f-84c3-405c-b41f-604a06c1bb71-ovn-node-metrics-cert\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.977801 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978025 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-systemd-units\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978067 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-slash\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978091 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978116 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-kubelet\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978148 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-netns\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978172 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-etc-openvswitch\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978491 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-ovn-kubernetes\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978566 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-var-lib-openvswitch\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978614 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-log-socket\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978646 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-ovn\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978873 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-bin\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978922 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-openvswitch\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978925 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-script-lib\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978958 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-systemd\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978988 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-node-log\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.978988 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-netd\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.979142 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-env-overrides\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.979149 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-config\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.982972 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d371271f-84c3-405c-b41f-604a06c1bb71-ovn-node-metrics-cert\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:50 crc kubenswrapper[4687]: E1125 09:03:50.983934 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.986883 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.987010 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.987100 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.987172 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.987265 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:50Z","lastTransitionTime":"2025-11-25T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.993869 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:50 crc kubenswrapper[4687]: I1125 09:03:50.995904 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9jb5\" (UniqueName: \"kubernetes.io/projected/d371271f-84c3-405c-b41f-604a06c1bb71-kube-api-access-l9jb5\") pod \"ovnkube-node-p68hx\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:51 crc kubenswrapper[4687]: E1125 09:03:51.003837 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.007806 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.007845 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.007854 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.007870 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.007882 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.013022 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:51 crc kubenswrapper[4687]: E1125 09:03:51.021521 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:51 crc kubenswrapper[4687]: E1125 09:03:51.021929 4687 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.023362 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.023623 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.023887 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.024103 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.024314 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.072957 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:51 crc kubenswrapper[4687]: W1125 09:03:51.085232 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd371271f_84c3_405c_b41f_604a06c1bb71.slice/crio-45c46ddac61f3279e4ffbb7538198ae91eb749e696cf641bc4651d88a88977f6 WatchSource:0}: Error finding container 45c46ddac61f3279e4ffbb7538198ae91eb749e696cf641bc4651d88a88977f6: Status 404 returned error can't find the container with id 45c46ddac61f3279e4ffbb7538198ae91eb749e696cf641bc4651d88a88977f6 Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.127357 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.127398 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.127412 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.127457 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.127472 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.229789 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.229830 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.229839 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.229854 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.229863 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.332555 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.332594 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.332602 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.332616 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.332630 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.434976 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.435043 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.435062 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.435092 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.435111 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.537660 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.537721 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.537741 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.537768 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.537786 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.640745 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.640804 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.640821 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.640844 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.640867 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.743922 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.743971 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.743981 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.743994 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.744003 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.847000 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.847069 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.847087 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.847109 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.847126 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.911168 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-9zmf6" event={"ID":"37ae5d60-327b-4f2d-83c3-bd775960a7ad","Type":"ContainerStarted","Data":"5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.914017 4687 generic.go:334] "Generic (PLEG): container finished" podID="9736e597-ba61-47a5-b1e2-02b151c5cac0" containerID="23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d" exitCode=0 Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.914344 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerDied","Data":"23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.916353 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.918059 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05" exitCode=0 Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.918140 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.918170 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"45c46ddac61f3279e4ffbb7538198ae91eb749e696cf641bc4651d88a88977f6"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.923389 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wlzrb" event={"ID":"0e7c96e4-c7fa-466f-b0b6-495612ed71f8","Type":"ContainerStarted","Data":"42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.933689 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.950918 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.950988 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.951011 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.951040 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.951063 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:51Z","lastTransitionTime":"2025-11-25T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.957447 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:51 crc kubenswrapper[4687]: I1125 09:03:51.979244 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.009202 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.022553 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.039956 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.054565 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.054604 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.054613 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.054631 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.054643 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:52Z","lastTransitionTime":"2025-11-25T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.055323 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.067185 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.081871 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.096709 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.107203 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.120850 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.139175 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.152731 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.158262 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.158288 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.158298 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.158311 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.158321 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:52Z","lastTransitionTime":"2025-11-25T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.170279 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.181325 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.194300 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.206901 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.218759 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.234380 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.246215 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.258631 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.259982 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.260011 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.260020 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.260035 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.260047 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:52Z","lastTransitionTime":"2025-11-25T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.289055 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.299759 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-pjj4b"] Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.301221 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.306626 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.306644 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.306761 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.306791 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.307953 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.324535 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.338098 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.349372 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.362137 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.362707 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.362808 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.362912 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.363118 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.363280 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:52Z","lastTransitionTime":"2025-11-25T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.373908 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.385053 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.396459 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.398145 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.398281 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c8f16526-df37-4a3b-9fc6-010c96296946-serviceca\") pod \"node-ca-pjj4b\" (UID: \"c8f16526-df37-4a3b-9fc6-010c96296946\") " pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.398358 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.398423 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c8f16526-df37-4a3b-9fc6-010c96296946-host\") pod \"node-ca-pjj4b\" (UID: \"c8f16526-df37-4a3b-9fc6-010c96296946\") " pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.398487 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.398576 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.398653 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.398720 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbqmj\" (UniqueName: \"kubernetes.io/projected/c8f16526-df37-4a3b-9fc6-010c96296946-kube-api-access-dbqmj\") pod \"node-ca-pjj4b\" (UID: \"c8f16526-df37-4a3b-9fc6-010c96296946\") " pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.398861 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:04:00.398847294 +0000 UTC m=+35.452487012 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.398968 4687 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.399054 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:00.399046619 +0000 UTC m=+35.452686337 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.399379 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.399472 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.399578 4687 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.399654 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:00.399644705 +0000 UTC m=+35.453284413 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.399766 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.399831 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.399888 4687 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.399955 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:00.399947143 +0000 UTC m=+35.453586861 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.400051 4687 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.400122 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:00.400113907 +0000 UTC m=+35.453753625 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.408882 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.421190 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.432175 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.447785 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.463476 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.465650 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.465752 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.465810 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.465874 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.465945 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:52Z","lastTransitionTime":"2025-11-25T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.482292 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.497470 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.499784 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbqmj\" (UniqueName: \"kubernetes.io/projected/c8f16526-df37-4a3b-9fc6-010c96296946-kube-api-access-dbqmj\") pod \"node-ca-pjj4b\" (UID: \"c8f16526-df37-4a3b-9fc6-010c96296946\") " pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.499905 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c8f16526-df37-4a3b-9fc6-010c96296946-serviceca\") pod \"node-ca-pjj4b\" (UID: \"c8f16526-df37-4a3b-9fc6-010c96296946\") " pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.499977 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c8f16526-df37-4a3b-9fc6-010c96296946-host\") pod \"node-ca-pjj4b\" (UID: \"c8f16526-df37-4a3b-9fc6-010c96296946\") " pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.500069 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c8f16526-df37-4a3b-9fc6-010c96296946-host\") pod \"node-ca-pjj4b\" (UID: \"c8f16526-df37-4a3b-9fc6-010c96296946\") " pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.502062 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c8f16526-df37-4a3b-9fc6-010c96296946-serviceca\") pod \"node-ca-pjj4b\" (UID: \"c8f16526-df37-4a3b-9fc6-010c96296946\") " pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.513906 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.526820 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbqmj\" (UniqueName: \"kubernetes.io/projected/c8f16526-df37-4a3b-9fc6-010c96296946-kube-api-access-dbqmj\") pod \"node-ca-pjj4b\" (UID: \"c8f16526-df37-4a3b-9fc6-010c96296946\") " pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.533628 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.533992 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.534417 4687 scope.go:117] "RemoveContainer" containerID="9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1" Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.534621 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.568626 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.568697 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.568710 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.568736 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.568768 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:52Z","lastTransitionTime":"2025-11-25T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.630234 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-pjj4b" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.673458 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.673635 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.673705 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.673770 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.673831 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:52Z","lastTransitionTime":"2025-11-25T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.734697 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.734789 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.734973 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.735088 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.734952 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:52 crc kubenswrapper[4687]: E1125 09:03:52.735363 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.783759 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.783791 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.783801 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.783830 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.783840 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:52Z","lastTransitionTime":"2025-11-25T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.886668 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.886730 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.886741 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.886764 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.886777 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:52Z","lastTransitionTime":"2025-11-25T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.930422 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.930480 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.930522 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.930540 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.930561 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.932670 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerStarted","Data":"df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.933602 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-pjj4b" event={"ID":"c8f16526-df37-4a3b-9fc6-010c96296946","Type":"ContainerStarted","Data":"00fa9dc5253730905ddc88b8ece330d3bea637015e4c33805c34585da4a175cf"} Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.946943 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.983206 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.989859 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.990104 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.990230 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.990351 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:52 crc kubenswrapper[4687]: I1125 09:03:52.990474 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:52Z","lastTransitionTime":"2025-11-25T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.001103 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.014094 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.026311 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.047268 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.058626 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.068489 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.081856 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.095105 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.095135 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.095143 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.095157 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.095166 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:53Z","lastTransitionTime":"2025-11-25T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.095704 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.109348 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.121309 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.142075 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.153868 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.197806 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.197842 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.197854 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.197875 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.197887 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:53Z","lastTransitionTime":"2025-11-25T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.301381 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.301752 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.301825 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.301897 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.301972 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:53Z","lastTransitionTime":"2025-11-25T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.404539 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.404571 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.404579 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.404596 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.404606 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:53Z","lastTransitionTime":"2025-11-25T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.509128 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.509707 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.509721 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.509744 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.509762 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:53Z","lastTransitionTime":"2025-11-25T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.612260 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.612309 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.612324 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.612342 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.612356 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:53Z","lastTransitionTime":"2025-11-25T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.714148 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.714218 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.714234 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.714257 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.714273 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:53Z","lastTransitionTime":"2025-11-25T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.816532 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.816606 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.816630 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.816660 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.816683 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:53Z","lastTransitionTime":"2025-11-25T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.942200 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.942230 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.942240 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.942254 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.942266 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:53Z","lastTransitionTime":"2025-11-25T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.944982 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.946034 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-pjj4b" event={"ID":"c8f16526-df37-4a3b-9fc6-010c96296946","Type":"ContainerStarted","Data":"5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.948409 4687 generic.go:334] "Generic (PLEG): container finished" podID="9736e597-ba61-47a5-b1e2-02b151c5cac0" containerID="df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c" exitCode=0 Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.948454 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerDied","Data":"df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c"} Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.974897 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:53 crc kubenswrapper[4687]: I1125 09:03:53.990451 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.014805 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.039936 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.044226 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.044260 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.044271 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.044285 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.044299 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:54Z","lastTransitionTime":"2025-11-25T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.053305 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.076992 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.091306 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.103954 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.114880 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.126075 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.137374 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.146977 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.147023 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.147033 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.147050 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.147061 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:54Z","lastTransitionTime":"2025-11-25T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.152613 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.162409 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.171547 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.184050 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.195149 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.223801 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.237523 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.249632 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.249666 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.249674 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.249689 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.249698 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:54Z","lastTransitionTime":"2025-11-25T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.252687 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.267630 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.279627 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.292005 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.308746 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.326485 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.340065 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.352303 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.352346 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.352358 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.352375 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.352386 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:54Z","lastTransitionTime":"2025-11-25T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.353928 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.372086 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.386074 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.454727 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.454815 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.454838 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.455339 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.455694 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:54Z","lastTransitionTime":"2025-11-25T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.558474 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.558570 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.558589 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.558614 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.558633 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:54Z","lastTransitionTime":"2025-11-25T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.661392 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.661459 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.661476 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.661538 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.661556 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:54Z","lastTransitionTime":"2025-11-25T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.734687 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.734753 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.734687 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:54 crc kubenswrapper[4687]: E1125 09:03:54.734913 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:03:54 crc kubenswrapper[4687]: E1125 09:03:54.735112 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:03:54 crc kubenswrapper[4687]: E1125 09:03:54.735263 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.764103 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.764147 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.764165 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.764188 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.764204 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:54Z","lastTransitionTime":"2025-11-25T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.866552 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.866614 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.866632 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.866655 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.866672 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:54Z","lastTransitionTime":"2025-11-25T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.954470 4687 generic.go:334] "Generic (PLEG): container finished" podID="9736e597-ba61-47a5-b1e2-02b151c5cac0" containerID="365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2" exitCode=0 Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.954544 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerDied","Data":"365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.968902 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.968967 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.968985 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.969012 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.969029 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:54Z","lastTransitionTime":"2025-11-25T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.977177 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:54 crc kubenswrapper[4687]: I1125 09:03:54.998904 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.016737 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.035641 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.057768 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.072904 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.072990 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.073019 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.073052 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.073075 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:55Z","lastTransitionTime":"2025-11-25T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.077878 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.093688 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.113547 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.134082 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.153649 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.176486 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.176596 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.176614 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.176641 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.176659 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:55Z","lastTransitionTime":"2025-11-25T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.181450 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.195068 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.213773 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.261796 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.279184 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.279232 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.279245 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.279264 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.279278 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:55Z","lastTransitionTime":"2025-11-25T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.381624 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.381667 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.381680 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.381696 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.381709 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:55Z","lastTransitionTime":"2025-11-25T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.484190 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.484246 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.484262 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.484283 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.484301 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:55Z","lastTransitionTime":"2025-11-25T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.587109 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.587170 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.587191 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.587219 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.587239 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:55Z","lastTransitionTime":"2025-11-25T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.690462 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.690561 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.690585 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.690614 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.690635 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:55Z","lastTransitionTime":"2025-11-25T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.759170 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.783963 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.793417 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.793451 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.793462 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.793480 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.793493 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:55Z","lastTransitionTime":"2025-11-25T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.809837 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.827097 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.846251 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.859735 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.871260 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.886399 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.895527 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.895594 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.895613 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.895636 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.895655 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:55Z","lastTransitionTime":"2025-11-25T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.905185 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.918707 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.937807 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.954788 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.961404 4687 generic.go:334] "Generic (PLEG): container finished" podID="9736e597-ba61-47a5-b1e2-02b151c5cac0" containerID="4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29" exitCode=0 Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.961444 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerDied","Data":"4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29"} Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.973111 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:55 crc kubenswrapper[4687]: I1125 09:03:55.994078 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.000862 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.000923 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.000942 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.000968 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.000986 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:56Z","lastTransitionTime":"2025-11-25T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.013751 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.037740 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.053458 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.074918 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.086757 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.102420 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.103225 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.103290 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.103309 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.103334 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.103350 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:56Z","lastTransitionTime":"2025-11-25T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.116248 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.128411 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.136766 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.148357 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.161031 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.173165 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.184394 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.195755 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.205122 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.205173 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.205189 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.205207 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.205222 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:56Z","lastTransitionTime":"2025-11-25T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.307689 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.307727 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.307736 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.307750 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.307760 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:56Z","lastTransitionTime":"2025-11-25T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.410949 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.411015 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.411033 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.411057 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.411078 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:56Z","lastTransitionTime":"2025-11-25T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.513746 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.513782 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.513792 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.513807 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.513820 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:56Z","lastTransitionTime":"2025-11-25T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.616585 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.616657 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.616689 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.616716 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.616740 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:56Z","lastTransitionTime":"2025-11-25T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.720325 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.720401 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.720420 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.720445 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.720472 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:56Z","lastTransitionTime":"2025-11-25T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.734678 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.734740 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.734769 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:56 crc kubenswrapper[4687]: E1125 09:03:56.734802 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:03:56 crc kubenswrapper[4687]: E1125 09:03:56.734866 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:03:56 crc kubenswrapper[4687]: E1125 09:03:56.734977 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.823207 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.823269 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.823292 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.823321 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.823343 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:56Z","lastTransitionTime":"2025-11-25T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.925880 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.925920 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.925930 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.925947 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.925958 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:56Z","lastTransitionTime":"2025-11-25T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.970680 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682"} Nov 25 09:03:56 crc kubenswrapper[4687]: I1125 09:03:56.975324 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerStarted","Data":"35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77"} Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.001913 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.029499 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.029623 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.029641 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.029665 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.029682 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:57Z","lastTransitionTime":"2025-11-25T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.032498 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.049274 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.069105 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.086303 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.098816 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.114208 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.132878 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.132938 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.132956 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.132978 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.132995 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:57Z","lastTransitionTime":"2025-11-25T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.134323 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.151245 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.170264 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.189143 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.207259 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.227191 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.235720 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.235760 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.235771 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.235788 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.235799 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:57Z","lastTransitionTime":"2025-11-25T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.245299 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.338443 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.338531 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.338543 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.338562 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.338574 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:57Z","lastTransitionTime":"2025-11-25T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.441780 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.441839 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.441858 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.441883 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.441900 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:57Z","lastTransitionTime":"2025-11-25T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.544546 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.544636 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.544668 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.544694 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.544712 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:57Z","lastTransitionTime":"2025-11-25T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.647535 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.647598 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.647615 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.647639 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.647657 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:57Z","lastTransitionTime":"2025-11-25T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.750844 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.750902 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.750919 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.750942 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.750964 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:57Z","lastTransitionTime":"2025-11-25T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.854210 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.854256 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.854266 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.854282 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.854294 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:57Z","lastTransitionTime":"2025-11-25T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.956712 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.956804 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.956823 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.956848 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:57 crc kubenswrapper[4687]: I1125 09:03:57.956869 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:57Z","lastTransitionTime":"2025-11-25T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.059184 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.059253 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.059271 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.059297 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.059314 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:58Z","lastTransitionTime":"2025-11-25T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.162063 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.162108 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.162124 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.162146 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.162163 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:58Z","lastTransitionTime":"2025-11-25T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.265690 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.265753 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.265769 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.265794 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.265812 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:58Z","lastTransitionTime":"2025-11-25T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.368646 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.368705 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.368722 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.368748 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.368765 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:58Z","lastTransitionTime":"2025-11-25T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.472186 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.472629 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.472643 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.472659 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.472672 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:58Z","lastTransitionTime":"2025-11-25T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.576050 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.576123 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.576139 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.576162 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.576183 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:58Z","lastTransitionTime":"2025-11-25T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.679103 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.679166 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.679188 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.679213 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.679231 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:58Z","lastTransitionTime":"2025-11-25T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.734266 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.734358 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:03:58 crc kubenswrapper[4687]: E1125 09:03:58.734463 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.734439 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:03:58 crc kubenswrapper[4687]: E1125 09:03:58.734680 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:03:58 crc kubenswrapper[4687]: E1125 09:03:58.734917 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.782088 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.782145 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.782163 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.782182 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.782194 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:58Z","lastTransitionTime":"2025-11-25T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.885162 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.885243 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.885266 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.885301 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.885324 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:58Z","lastTransitionTime":"2025-11-25T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.987462 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.987575 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.987599 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.987635 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.987659 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:58Z","lastTransitionTime":"2025-11-25T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.987825 4687 generic.go:334] "Generic (PLEG): container finished" podID="9736e597-ba61-47a5-b1e2-02b151c5cac0" containerID="35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77" exitCode=0 Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.987900 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerDied","Data":"35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.996967 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451"} Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.997569 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.997677 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:58 crc kubenswrapper[4687]: I1125 09:03:58.997857 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.008550 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.031264 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.047969 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.049323 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.051107 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.068280 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.082064 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.090289 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.091564 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.091590 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.091609 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.091626 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:59Z","lastTransitionTime":"2025-11-25T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.101382 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.117579 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.135275 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.155364 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.170155 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.195203 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.195824 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.195985 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.196120 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.195352 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.196244 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:59Z","lastTransitionTime":"2025-11-25T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.210543 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.228307 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.246194 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.263152 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.274536 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.295214 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.299754 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.299842 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.299864 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.299899 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.299923 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:59Z","lastTransitionTime":"2025-11-25T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.313815 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.340756 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.355942 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.367845 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.383207 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.401266 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.405982 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.406021 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.406031 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.406048 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.406061 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:59Z","lastTransitionTime":"2025-11-25T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.418398 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.445667 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.457336 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.476218 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.492974 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:03:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.508484 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.508529 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.508539 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.508555 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.508565 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:59Z","lastTransitionTime":"2025-11-25T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.610954 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.610995 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.611007 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.611024 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.611036 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:59Z","lastTransitionTime":"2025-11-25T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.713564 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.713612 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.713623 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.713641 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.713652 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:59Z","lastTransitionTime":"2025-11-25T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.817949 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.818000 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.818011 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.818029 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.818040 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:59Z","lastTransitionTime":"2025-11-25T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.974225 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.974269 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.974282 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.974300 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:03:59 crc kubenswrapper[4687]: I1125 09:03:59.974314 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:03:59Z","lastTransitionTime":"2025-11-25T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.007845 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerStarted","Data":"2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f"} Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.076818 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.076872 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.076900 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.076922 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.076936 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:00Z","lastTransitionTime":"2025-11-25T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.179678 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.179720 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.179731 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.179746 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.179759 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:00Z","lastTransitionTime":"2025-11-25T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.282171 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.282217 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.282228 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.282245 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.282257 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:00Z","lastTransitionTime":"2025-11-25T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.384151 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.384196 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.384222 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.384248 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.384264 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:00Z","lastTransitionTime":"2025-11-25T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.399074 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.399209 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:04:16.399184758 +0000 UTC m=+51.452824476 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.399381 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.399493 4687 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.399572 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:16.399555347 +0000 UTC m=+51.453195085 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.486883 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.486916 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.486926 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.486944 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.486955 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:00Z","lastTransitionTime":"2025-11-25T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.500637 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.500688 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.500718 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.501213 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.501254 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.501256 4687 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.501367 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:16.501341416 +0000 UTC m=+51.554981164 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.501270 4687 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.501899 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:16.501855469 +0000 UTC m=+51.555495187 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.501230 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.503590 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.503603 4687 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.503645 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:16.503633367 +0000 UTC m=+51.557273085 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.589938 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.590026 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.590053 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.590085 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.590110 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:00Z","lastTransitionTime":"2025-11-25T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.692712 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.692758 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.692775 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.692797 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.692813 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:00Z","lastTransitionTime":"2025-11-25T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.734373 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.734455 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.734577 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.734687 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.734777 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:00 crc kubenswrapper[4687]: E1125 09:04:00.734853 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.794733 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.794773 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.794786 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.794805 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.794823 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:00Z","lastTransitionTime":"2025-11-25T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.897835 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.897890 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.897903 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.897924 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:00 crc kubenswrapper[4687]: I1125 09:04:00.897936 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:00Z","lastTransitionTime":"2025-11-25T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.000007 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.000082 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.000102 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.000126 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.000144 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.016338 4687 generic.go:334] "Generic (PLEG): container finished" podID="9736e597-ba61-47a5-b1e2-02b151c5cac0" containerID="2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f" exitCode=0 Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.016422 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerDied","Data":"2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.045346 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.068033 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.085362 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.105871 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.105933 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.105944 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.105963 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.105974 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.120665 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.137288 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.153611 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.172742 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.176987 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.177018 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.177029 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.177045 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.177057 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.190805 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: E1125 09:04:01.193724 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.199826 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.199875 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.199898 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.199926 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.199953 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.207716 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: E1125 09:04:01.215060 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.219531 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.219564 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.219575 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.219591 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.219602 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.228188 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: E1125 09:04:01.240253 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.242219 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.245022 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.245058 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.245071 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.245090 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.245107 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.260369 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: E1125 09:04:01.265163 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.269616 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.269676 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.269699 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.269725 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.269743 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.277420 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: E1125 09:04:01.287655 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: E1125 09:04:01.287801 4687 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.289910 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.289954 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.289966 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.289984 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.290000 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.292840 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.392665 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.392740 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.392765 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.392792 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.392812 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.496118 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.496185 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.496210 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.496241 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.496268 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.602111 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.602177 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.602202 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.602231 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.602253 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.705797 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.705932 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.705997 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.706030 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.706071 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.808449 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.808521 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.808532 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.808549 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.808564 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.911167 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.911212 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.911223 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.911240 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:01 crc kubenswrapper[4687]: I1125 09:04:01.911252 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:01Z","lastTransitionTime":"2025-11-25T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.013645 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.013699 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.013708 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.013725 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.013734 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:02Z","lastTransitionTime":"2025-11-25T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.022668 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" event={"ID":"9736e597-ba61-47a5-b1e2-02b151c5cac0","Type":"ContainerStarted","Data":"6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.041298 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.059104 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.069202 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.082228 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.096131 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.107070 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.116684 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.116746 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.116762 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.116788 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.116803 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:02Z","lastTransitionTime":"2025-11-25T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.129163 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.147011 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.164255 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.182879 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.200451 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.215414 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.220223 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.220268 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.220279 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.220300 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.220317 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:02Z","lastTransitionTime":"2025-11-25T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.235034 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.253792 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.322714 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.322787 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.322806 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.322830 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.322847 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:02Z","lastTransitionTime":"2025-11-25T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.430955 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.431029 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.431067 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.431102 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.431125 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:02Z","lastTransitionTime":"2025-11-25T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.534629 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.534670 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.534681 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.534697 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.534708 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:02Z","lastTransitionTime":"2025-11-25T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.637191 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.637239 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.637251 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.637266 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.637275 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:02Z","lastTransitionTime":"2025-11-25T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.733725 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.733776 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:02 crc kubenswrapper[4687]: E1125 09:04:02.734085 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:02 crc kubenswrapper[4687]: E1125 09:04:02.734686 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.734747 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:02 crc kubenswrapper[4687]: E1125 09:04:02.734827 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.739839 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.739878 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.739887 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.739934 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.739946 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:02Z","lastTransitionTime":"2025-11-25T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.842255 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.842311 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.842330 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.842352 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.842371 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:02Z","lastTransitionTime":"2025-11-25T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.946468 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.946558 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.946573 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.946591 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:02 crc kubenswrapper[4687]: I1125 09:04:02.946605 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:02Z","lastTransitionTime":"2025-11-25T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.003069 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn"] Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.003490 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.004846 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.005826 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.018061 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.031716 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.045735 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.049628 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.049684 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.049700 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.049721 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.049735 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:03Z","lastTransitionTime":"2025-11-25T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.058913 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.068040 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.077024 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.090856 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.105718 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.121614 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.128200 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.128267 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.128377 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h78z2\" (UniqueName: \"kubernetes.io/projected/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-kube-api-access-h78z2\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.128435 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.137069 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.152791 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.152836 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.152848 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.152867 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.152883 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:03Z","lastTransitionTime":"2025-11-25T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.166977 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.214680 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.229065 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.229119 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.229163 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h78z2\" (UniqueName: \"kubernetes.io/projected/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-kube-api-access-h78z2\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.229204 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.230136 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.230317 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.230929 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.235675 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.251223 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.253835 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h78z2\" (UniqueName: \"kubernetes.io/projected/2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d-kube-api-access-h78z2\") pod \"ovnkube-control-plane-749d76644c-xg6dn\" (UID: \"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.255010 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.255045 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.255058 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.255077 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.255093 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:03Z","lastTransitionTime":"2025-11-25T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.268153 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.317364 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" Nov 25 09:04:03 crc kubenswrapper[4687]: W1125 09:04:03.332685 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d9c3b7b_52fe_4bc4_ae61_2ab61dc1ac0d.slice/crio-b826b303cd2123a0496e63074bb53374a406b7d2e4bc9ff3d8596ad0d325548d WatchSource:0}: Error finding container b826b303cd2123a0496e63074bb53374a406b7d2e4bc9ff3d8596ad0d325548d: Status 404 returned error can't find the container with id b826b303cd2123a0496e63074bb53374a406b7d2e4bc9ff3d8596ad0d325548d Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.358781 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.358823 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.358837 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.358858 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.358879 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:03Z","lastTransitionTime":"2025-11-25T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.470969 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.471025 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.471039 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.471059 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.471073 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:03Z","lastTransitionTime":"2025-11-25T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.574097 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.574143 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.574153 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.574168 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.574178 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:03Z","lastTransitionTime":"2025-11-25T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.676904 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.676959 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.676976 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.677002 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.677021 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:03Z","lastTransitionTime":"2025-11-25T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.779404 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.779456 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.779466 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.779483 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.779494 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:03Z","lastTransitionTime":"2025-11-25T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.883731 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.883813 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.883830 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.883856 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.883874 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:03Z","lastTransitionTime":"2025-11-25T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.986796 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.986864 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.986881 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.986906 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:03 crc kubenswrapper[4687]: I1125 09:04:03.986923 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:03Z","lastTransitionTime":"2025-11-25T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.047568 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" event={"ID":"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d","Type":"ContainerStarted","Data":"3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.047641 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" event={"ID":"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d","Type":"ContainerStarted","Data":"1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.047661 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" event={"ID":"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d","Type":"ContainerStarted","Data":"b826b303cd2123a0496e63074bb53374a406b7d2e4bc9ff3d8596ad0d325548d"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.050039 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/0.log" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.054544 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451" exitCode=1 Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.054610 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.055392 4687 scope.go:117] "RemoveContainer" containerID="cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.074551 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.099686 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.099722 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.099732 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.099747 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.099756 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:04Z","lastTransitionTime":"2025-11-25T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.126410 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\" 5948 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:04:02.917407 5948 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:04:02.917466 5948 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:04:02.917543 5948 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:02.917572 5948 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:02.917563 5948 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:04:02.917583 5948 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:02.917610 5948 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:04:02.917633 5948 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:04:02.917636 5948 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:02.917707 5948 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:04:02.917716 5948 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:02.917731 5948 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:04:02.917777 5948 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:04:02.917789 5948 factory.go:656] Stopping watch factory\\\\nI1125 09:04:02.917800 5948 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.144024 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.179964 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.196846 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.203082 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.203132 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.203148 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.203170 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.203185 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:04Z","lastTransitionTime":"2025-11-25T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.218730 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.243106 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.266778 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.282852 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.299739 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.305742 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.305782 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.305792 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.305805 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.305814 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:04Z","lastTransitionTime":"2025-11-25T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.317855 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.338727 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.353896 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.366953 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.383145 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.407741 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.407792 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.407809 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.407829 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.407845 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:04Z","lastTransitionTime":"2025-11-25T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.511292 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-cscrb"] Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.512031 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.512075 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.512087 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.512103 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.512113 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:04Z","lastTransitionTime":"2025-11-25T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.512124 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:04 crc kubenswrapper[4687]: E1125 09:04:04.512216 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.531797 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.548779 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.549433 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.549544 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9htjw\" (UniqueName: \"kubernetes.io/projected/0433643a-5ed9-485b-a788-51de4a92f461-kube-api-access-9htjw\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.562675 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.580907 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.611120 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\" 5948 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:04:02.917407 5948 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:04:02.917466 5948 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:04:02.917543 5948 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:02.917572 5948 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:02.917563 5948 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:04:02.917583 5948 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:02.917610 5948 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:04:02.917633 5948 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:04:02.917636 5948 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:02.917707 5948 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:04:02.917716 5948 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:02.917731 5948 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:04:02.917777 5948 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:04:02.917789 5948 factory.go:656] Stopping watch factory\\\\nI1125 09:04:02.917800 5948 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.614306 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.614409 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.614475 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.614522 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.614540 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:04Z","lastTransitionTime":"2025-11-25T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.626671 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.643084 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.650758 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9htjw\" (UniqueName: \"kubernetes.io/projected/0433643a-5ed9-485b-a788-51de4a92f461-kube-api-access-9htjw\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.650890 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:04 crc kubenswrapper[4687]: E1125 09:04:04.651119 4687 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:04 crc kubenswrapper[4687]: E1125 09:04:04.651236 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs podName:0433643a-5ed9-485b-a788-51de4a92f461 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:05.151208753 +0000 UTC m=+40.204848501 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs") pod "network-metrics-daemon-cscrb" (UID: "0433643a-5ed9-485b-a788-51de4a92f461") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.660769 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.678061 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9htjw\" (UniqueName: \"kubernetes.io/projected/0433643a-5ed9-485b-a788-51de4a92f461-kube-api-access-9htjw\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.679115 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.692812 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.704587 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.717025 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.717053 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.717062 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.717075 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.717083 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:04Z","lastTransitionTime":"2025-11-25T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.720389 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.734722 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.734815 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:04 crc kubenswrapper[4687]: E1125 09:04:04.734844 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:04 crc kubenswrapper[4687]: E1125 09:04:04.735000 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.735116 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.735388 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:04 crc kubenswrapper[4687]: E1125 09:04:04.735777 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.754875 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.772037 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.789288 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.819883 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.819949 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.819968 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.819993 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.820014 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:04Z","lastTransitionTime":"2025-11-25T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.922754 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.923825 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.923987 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.924207 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:04 crc kubenswrapper[4687]: I1125 09:04:04.924360 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:04Z","lastTransitionTime":"2025-11-25T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.026654 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.026961 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.027148 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.027296 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.027467 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:05Z","lastTransitionTime":"2025-11-25T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.061775 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/0.log" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.065903 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.066924 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.087626 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.105797 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.129683 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.129756 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.129796 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.129827 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.129849 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:05Z","lastTransitionTime":"2025-11-25T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.137847 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\" 5948 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:04:02.917407 5948 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:04:02.917466 5948 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:04:02.917543 5948 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:02.917572 5948 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:02.917563 5948 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:04:02.917583 5948 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:02.917610 5948 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:04:02.917633 5948 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:04:02.917636 5948 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:02.917707 5948 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:04:02.917716 5948 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:02.917731 5948 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:04:02.917777 5948 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:04:02.917789 5948 factory.go:656] Stopping watch factory\\\\nI1125 09:04:02.917800 5948 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.154971 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.156245 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:05 crc kubenswrapper[4687]: E1125 09:04:05.156364 4687 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:05 crc kubenswrapper[4687]: E1125 09:04:05.156437 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs podName:0433643a-5ed9-485b-a788-51de4a92f461 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:06.156422446 +0000 UTC m=+41.210062164 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs") pod "network-metrics-daemon-cscrb" (UID: "0433643a-5ed9-485b-a788-51de4a92f461") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.178315 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.192064 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.211451 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.224682 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.231959 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.231990 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.231999 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.232013 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.232023 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:05Z","lastTransitionTime":"2025-11-25T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.236590 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.247740 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.261605 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.274550 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.286659 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.308867 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.324937 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.333786 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.333851 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.333871 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.333893 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.333905 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:05Z","lastTransitionTime":"2025-11-25T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.340675 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.353668 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.368175 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.381985 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.395054 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.405461 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.415640 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.428884 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.436010 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.436048 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.436060 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.436076 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.436088 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:05Z","lastTransitionTime":"2025-11-25T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.443162 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.454686 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.468222 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.492226 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.507260 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.518937 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.532892 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.537855 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.537894 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.537904 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.537921 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.537934 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:05Z","lastTransitionTime":"2025-11-25T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.553793 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\" 5948 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:04:02.917407 5948 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:04:02.917466 5948 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:04:02.917543 5948 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:02.917572 5948 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:02.917563 5948 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:04:02.917583 5948 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:02.917610 5948 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:04:02.917633 5948 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:04:02.917636 5948 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:02.917707 5948 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:04:02.917716 5948 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:02.917731 5948 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:04:02.917777 5948 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:04:02.917789 5948 factory.go:656] Stopping watch factory\\\\nI1125 09:04:02.917800 5948 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.568213 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.640523 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.640585 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.640597 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.640613 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.640622 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:05Z","lastTransitionTime":"2025-11-25T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.736791 4687 scope.go:117] "RemoveContainer" containerID="9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.745390 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.745454 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.745473 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.745531 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.745552 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:05Z","lastTransitionTime":"2025-11-25T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.762629 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\" 5948 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:04:02.917407 5948 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:04:02.917466 5948 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:04:02.917543 5948 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:02.917572 5948 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:02.917563 5948 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:04:02.917583 5948 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:02.917610 5948 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:04:02.917633 5948 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:04:02.917636 5948 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:02.917707 5948 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:04:02.917716 5948 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:02.917731 5948 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:04:02.917777 5948 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:04:02.917789 5948 factory.go:656] Stopping watch factory\\\\nI1125 09:04:02.917800 5948 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.780474 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.805754 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.822471 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.842928 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.848695 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.848750 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.848773 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.848801 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.848823 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:05Z","lastTransitionTime":"2025-11-25T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.860816 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.876481 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.891119 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.905031 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.915913 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.929480 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.942009 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.954728 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.954996 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.955017 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.955026 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.955042 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.955050 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:05Z","lastTransitionTime":"2025-11-25T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.965554 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.978173 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:05 crc kubenswrapper[4687]: I1125 09:04:05.987215 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.057725 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.057775 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.057786 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.057811 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.057823 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:06Z","lastTransitionTime":"2025-11-25T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.070171 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.073011 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.074988 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/1.log" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.075646 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/0.log" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.079541 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122" exitCode=1 Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.079584 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.079619 4687 scope.go:117] "RemoveContainer" containerID="cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.081320 4687 scope.go:117] "RemoveContainer" containerID="9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122" Nov 25 09:04:06 crc kubenswrapper[4687]: E1125 09:04:06.081620 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.099432 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.116798 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.132196 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.148771 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.159672 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.159716 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.159729 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.159750 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.159765 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:06Z","lastTransitionTime":"2025-11-25T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.167453 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:06 crc kubenswrapper[4687]: E1125 09:04:06.167592 4687 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:06 crc kubenswrapper[4687]: E1125 09:04:06.167641 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs podName:0433643a-5ed9-485b-a788-51de4a92f461 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:08.167628392 +0000 UTC m=+43.221268110 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs") pod "network-metrics-daemon-cscrb" (UID: "0433643a-5ed9-485b-a788-51de4a92f461") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.179588 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\" 5948 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:04:02.917407 5948 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:04:02.917466 5948 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:04:02.917543 5948 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:02.917572 5948 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:02.917563 5948 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:04:02.917583 5948 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:02.917610 5948 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:04:02.917633 5948 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:04:02.917636 5948 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:02.917707 5948 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:04:02.917716 5948 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:02.917731 5948 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:04:02.917777 5948 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:04:02.917789 5948 factory.go:656] Stopping watch factory\\\\nI1125 09:04:02.917800 5948 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"message\\\":\\\"P event handler 8\\\\nI1125 09:04:05.652343 6178 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:04:05.652418 6178 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652661 6178 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652984 6178 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653235 6178 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653484 6178 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:05.653536 6178 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:05.653545 6178 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:05.653594 6178 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:05.653632 6178 factory.go:656] Stopping watch factory\\\\nI1125 09:04:05.653642 6178 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:05.653669 6178 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.192528 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.210412 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.223855 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.237288 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.249903 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.260657 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.271681 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.271719 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.271728 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.271743 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.271757 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:06Z","lastTransitionTime":"2025-11-25T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.277471 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.292034 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.307944 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.321206 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.334734 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.374721 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.374754 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.374764 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.374780 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.374791 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:06Z","lastTransitionTime":"2025-11-25T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.477353 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.477418 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.477455 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.477487 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.477546 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:06Z","lastTransitionTime":"2025-11-25T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.580718 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.580778 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.580797 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.580821 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.580840 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:06Z","lastTransitionTime":"2025-11-25T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.683916 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.683990 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.684007 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.684026 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.684041 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:06Z","lastTransitionTime":"2025-11-25T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.734661 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.734711 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.734761 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.734720 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:06 crc kubenswrapper[4687]: E1125 09:04:06.734842 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:06 crc kubenswrapper[4687]: E1125 09:04:06.734986 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:06 crc kubenswrapper[4687]: E1125 09:04:06.735068 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:06 crc kubenswrapper[4687]: E1125 09:04:06.735125 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.786718 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.786790 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.786829 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.786865 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.786886 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:06Z","lastTransitionTime":"2025-11-25T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.889263 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.889320 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.889342 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.889369 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.889393 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:06Z","lastTransitionTime":"2025-11-25T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.991740 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.991769 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.991778 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.991797 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:06 crc kubenswrapper[4687]: I1125 09:04:06.991808 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:06Z","lastTransitionTime":"2025-11-25T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.085417 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/1.log" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.089552 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.090617 4687 scope.go:117] "RemoveContainer" containerID="9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122" Nov 25 09:04:07 crc kubenswrapper[4687]: E1125 09:04:07.090923 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.094097 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.094131 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.094143 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.094155 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.094165 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:07Z","lastTransitionTime":"2025-11-25T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.105062 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.117950 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.134490 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.152763 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.167575 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.188858 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.196541 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.196580 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.196592 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.196607 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.196619 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:07Z","lastTransitionTime":"2025-11-25T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.203946 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.216549 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.230685 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.240649 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.255165 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.298410 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.298451 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.298464 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.298484 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.298496 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:07Z","lastTransitionTime":"2025-11-25T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.307414 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9fa26b1a492912a1cdf9c61faafa46bb665a4698338d5bae45164383977451\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"message\\\":\\\" 5948 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:04:02.917407 5948 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:04:02.917466 5948 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:04:02.917543 5948 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:02.917572 5948 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:02.917563 5948 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:04:02.917583 5948 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:02.917610 5948 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:04:02.917633 5948 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:04:02.917636 5948 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:02.917707 5948 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:04:02.917716 5948 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:02.917731 5948 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:04:02.917777 5948 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:04:02.917789 5948 factory.go:656] Stopping watch factory\\\\nI1125 09:04:02.917800 5948 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"message\\\":\\\"P event handler 8\\\\nI1125 09:04:05.652343 6178 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:04:05.652418 6178 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652661 6178 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652984 6178 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653235 6178 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653484 6178 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:05.653536 6178 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:05.653545 6178 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:05.653594 6178 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:05.653632 6178 factory.go:656] Stopping watch factory\\\\nI1125 09:04:05.653642 6178 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:05.653669 6178 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.323951 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.334238 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.349036 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.365109 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.380840 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.395146 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.400522 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.400550 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.400558 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.400570 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.400579 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:07Z","lastTransitionTime":"2025-11-25T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.409457 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.420110 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.444330 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"message\\\":\\\"P event handler 8\\\\nI1125 09:04:05.652343 6178 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:04:05.652418 6178 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652661 6178 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652984 6178 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653235 6178 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653484 6178 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:05.653536 6178 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:05.653545 6178 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:05.653594 6178 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:05.653632 6178 factory.go:656] Stopping watch factory\\\\nI1125 09:04:05.653642 6178 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:05.653669 6178 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.457372 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.471141 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.483091 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.494486 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.502342 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.502393 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.502410 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.502432 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.502449 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:07Z","lastTransitionTime":"2025-11-25T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.505634 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.520006 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.533919 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.547035 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.563882 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.578983 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.590643 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.604612 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.604699 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.604718 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.604735 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.604751 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:07Z","lastTransitionTime":"2025-11-25T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.708406 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.708482 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.708549 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.708599 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.708619 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:07Z","lastTransitionTime":"2025-11-25T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.811263 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.811343 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.811361 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.811390 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.811407 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:07Z","lastTransitionTime":"2025-11-25T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.915485 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.915590 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.915613 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.915649 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:07 crc kubenswrapper[4687]: I1125 09:04:07.915671 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:07Z","lastTransitionTime":"2025-11-25T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.018617 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.018699 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.018724 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.018755 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.018778 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:08Z","lastTransitionTime":"2025-11-25T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.122126 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.122167 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.122179 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.122194 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.122206 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:08Z","lastTransitionTime":"2025-11-25T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.188449 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:08 crc kubenswrapper[4687]: E1125 09:04:08.188689 4687 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:08 crc kubenswrapper[4687]: E1125 09:04:08.188802 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs podName:0433643a-5ed9-485b-a788-51de4a92f461 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:12.188774929 +0000 UTC m=+47.242414687 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs") pod "network-metrics-daemon-cscrb" (UID: "0433643a-5ed9-485b-a788-51de4a92f461") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.225723 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.225781 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.225796 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.225819 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.225835 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:08Z","lastTransitionTime":"2025-11-25T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.329454 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.329586 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.329608 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.329675 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.329697 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:08Z","lastTransitionTime":"2025-11-25T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.432053 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.432124 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.432159 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.432187 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.432213 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:08Z","lastTransitionTime":"2025-11-25T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.535627 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.535715 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.535755 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.535792 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.535818 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:08Z","lastTransitionTime":"2025-11-25T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.638934 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.638999 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.639021 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.639057 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.639079 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:08Z","lastTransitionTime":"2025-11-25T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.733780 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.733853 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.733886 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.733814 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:08 crc kubenswrapper[4687]: E1125 09:04:08.734000 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:08 crc kubenswrapper[4687]: E1125 09:04:08.734063 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:08 crc kubenswrapper[4687]: E1125 09:04:08.734166 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:08 crc kubenswrapper[4687]: E1125 09:04:08.734220 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.741420 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.741491 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.741555 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.741586 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.741608 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:08Z","lastTransitionTime":"2025-11-25T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.844818 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.844915 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.844957 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.844982 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.845002 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:08Z","lastTransitionTime":"2025-11-25T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.947620 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.947659 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.947667 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.947688 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:08 crc kubenswrapper[4687]: I1125 09:04:08.947697 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:08Z","lastTransitionTime":"2025-11-25T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.050090 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.050172 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.050194 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.050220 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.050236 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:09Z","lastTransitionTime":"2025-11-25T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.153213 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.153265 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.153276 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.153292 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.153303 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:09Z","lastTransitionTime":"2025-11-25T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.255733 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.255773 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.255783 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.255809 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.255837 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:09Z","lastTransitionTime":"2025-11-25T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.359022 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.359099 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.359122 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.359146 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.359163 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:09Z","lastTransitionTime":"2025-11-25T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.461552 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.461613 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.461630 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.461655 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.461673 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:09Z","lastTransitionTime":"2025-11-25T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.568957 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.569489 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.570019 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.570093 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.570121 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:09Z","lastTransitionTime":"2025-11-25T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.673130 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.673197 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.673250 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.673275 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.673289 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:09Z","lastTransitionTime":"2025-11-25T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.776301 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.776372 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.776388 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.776413 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.776433 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:09Z","lastTransitionTime":"2025-11-25T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.880280 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.880341 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.880351 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.880371 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.880388 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:09Z","lastTransitionTime":"2025-11-25T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.982973 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.983031 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.983044 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.983066 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:09 crc kubenswrapper[4687]: I1125 09:04:09.983083 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:09Z","lastTransitionTime":"2025-11-25T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.086561 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.086682 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.086702 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.086732 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.086750 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:10Z","lastTransitionTime":"2025-11-25T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.191429 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.191557 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.191581 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.191665 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.191684 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:10Z","lastTransitionTime":"2025-11-25T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.294930 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.295010 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.295029 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.295055 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.295082 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:10Z","lastTransitionTime":"2025-11-25T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.398820 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.398899 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.398924 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.398957 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.398979 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:10Z","lastTransitionTime":"2025-11-25T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.502440 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.502545 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.502559 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.502583 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.502598 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:10Z","lastTransitionTime":"2025-11-25T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.608218 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.608278 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.608295 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.608319 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.608336 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:10Z","lastTransitionTime":"2025-11-25T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.711168 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.711244 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.711267 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.711305 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.711326 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:10Z","lastTransitionTime":"2025-11-25T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.733956 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.733987 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.734032 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:10 crc kubenswrapper[4687]: E1125 09:04:10.734081 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.734098 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:10 crc kubenswrapper[4687]: E1125 09:04:10.734265 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:10 crc kubenswrapper[4687]: E1125 09:04:10.734368 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:10 crc kubenswrapper[4687]: E1125 09:04:10.734523 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.813900 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.813958 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.813978 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.814003 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.814023 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:10Z","lastTransitionTime":"2025-11-25T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.917832 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.917900 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.917917 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.917946 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:10 crc kubenswrapper[4687]: I1125 09:04:10.917963 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:10Z","lastTransitionTime":"2025-11-25T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.021769 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.021851 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.021869 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.021900 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.021918 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.124335 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.124404 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.124426 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.124456 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.124478 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.227435 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.227554 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.227595 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.227627 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.227649 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.330259 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.330311 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.330323 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.330341 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.330356 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.432698 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.432774 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.432797 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.432825 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.432843 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.535896 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.535935 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.535943 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.535961 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.535972 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.620663 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.620750 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.620769 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.620794 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.620813 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: E1125 09:04:11.641936 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.647703 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.647782 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.647805 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.647838 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.647860 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: E1125 09:04:11.664408 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.671601 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.671672 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.671699 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.671749 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.671774 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: E1125 09:04:11.689108 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.693724 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.693757 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.693768 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.693783 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.693794 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: E1125 09:04:11.709368 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.712482 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.712555 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.712566 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.712581 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.712591 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: E1125 09:04:11.724558 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:11 crc kubenswrapper[4687]: E1125 09:04:11.724733 4687 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.726260 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.726298 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.726309 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.726326 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.726338 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.829289 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.829341 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.829357 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.829379 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.829393 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.932667 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.932715 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.932727 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.932745 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:11 crc kubenswrapper[4687]: I1125 09:04:11.932759 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:11Z","lastTransitionTime":"2025-11-25T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.035474 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.035555 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.035572 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.035596 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.035665 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:12Z","lastTransitionTime":"2025-11-25T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.138940 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.139002 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.139014 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.139045 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.139060 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:12Z","lastTransitionTime":"2025-11-25T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.235041 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:12 crc kubenswrapper[4687]: E1125 09:04:12.235280 4687 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:12 crc kubenswrapper[4687]: E1125 09:04:12.235405 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs podName:0433643a-5ed9-485b-a788-51de4a92f461 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:20.23537667 +0000 UTC m=+55.289016518 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs") pod "network-metrics-daemon-cscrb" (UID: "0433643a-5ed9-485b-a788-51de4a92f461") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.241871 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.241932 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.241956 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.241973 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.241985 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:12Z","lastTransitionTime":"2025-11-25T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.344320 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.344373 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.344386 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.344406 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.344419 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:12Z","lastTransitionTime":"2025-11-25T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.447468 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.447524 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.447536 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.447554 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.447566 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:12Z","lastTransitionTime":"2025-11-25T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.550760 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.550795 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.550804 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.550818 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.550828 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:12Z","lastTransitionTime":"2025-11-25T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.652855 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.652889 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.652900 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.652915 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.652927 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:12Z","lastTransitionTime":"2025-11-25T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.734238 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:12 crc kubenswrapper[4687]: E1125 09:04:12.734361 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.734459 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:12 crc kubenswrapper[4687]: E1125 09:04:12.734621 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.734479 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.734980 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:12 crc kubenswrapper[4687]: E1125 09:04:12.736067 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:12 crc kubenswrapper[4687]: E1125 09:04:12.736188 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.755253 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.755305 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.755324 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.755353 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.755374 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:12Z","lastTransitionTime":"2025-11-25T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.858419 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.858472 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.858488 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.858555 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.858595 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:12Z","lastTransitionTime":"2025-11-25T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.961050 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.961085 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.961101 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.961116 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:12 crc kubenswrapper[4687]: I1125 09:04:12.961125 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:12Z","lastTransitionTime":"2025-11-25T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.066578 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.066654 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.066678 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.066708 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.066730 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:13Z","lastTransitionTime":"2025-11-25T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.169374 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.169435 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.169453 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.169474 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.169493 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:13Z","lastTransitionTime":"2025-11-25T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.271275 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.271299 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.271307 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.271320 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.271329 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:13Z","lastTransitionTime":"2025-11-25T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.375077 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.375142 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.375160 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.375185 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.375201 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:13Z","lastTransitionTime":"2025-11-25T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.479051 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.479108 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.479127 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.479152 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.479170 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:13Z","lastTransitionTime":"2025-11-25T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.583195 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.583257 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.583278 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.583309 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.583332 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:13Z","lastTransitionTime":"2025-11-25T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.686634 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.686697 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.686714 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.686736 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.686753 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:13Z","lastTransitionTime":"2025-11-25T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.789367 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.789393 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.789402 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.789413 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.789422 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:13Z","lastTransitionTime":"2025-11-25T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.891820 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.891850 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.891863 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.891879 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.891891 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:13Z","lastTransitionTime":"2025-11-25T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.994553 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.994652 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.994675 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.994701 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:13 crc kubenswrapper[4687]: I1125 09:04:13.994728 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:13Z","lastTransitionTime":"2025-11-25T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.098112 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.098151 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.098159 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.098173 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.098183 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:14Z","lastTransitionTime":"2025-11-25T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.201397 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.201464 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.201488 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.201600 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.201625 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:14Z","lastTransitionTime":"2025-11-25T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.304232 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.304266 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.304275 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.304287 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.304296 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:14Z","lastTransitionTime":"2025-11-25T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.406524 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.406564 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.406576 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.406613 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.406623 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:14Z","lastTransitionTime":"2025-11-25T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.509891 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.509940 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.509956 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.509978 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.509995 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:14Z","lastTransitionTime":"2025-11-25T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.613081 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.613145 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.613167 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.613201 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.613227 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:14Z","lastTransitionTime":"2025-11-25T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.716417 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.716460 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.716472 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.716488 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.716528 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:14Z","lastTransitionTime":"2025-11-25T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.734066 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.734142 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.734142 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.734181 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:14 crc kubenswrapper[4687]: E1125 09:04:14.734263 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:14 crc kubenswrapper[4687]: E1125 09:04:14.734451 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:14 crc kubenswrapper[4687]: E1125 09:04:14.734666 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:14 crc kubenswrapper[4687]: E1125 09:04:14.734715 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.819913 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.819978 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.820003 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.820033 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.820057 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:14Z","lastTransitionTime":"2025-11-25T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.922964 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.923010 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.923021 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.923036 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:14 crc kubenswrapper[4687]: I1125 09:04:14.923052 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:14Z","lastTransitionTime":"2025-11-25T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.026474 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.026537 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.026547 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.026562 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.026572 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:15Z","lastTransitionTime":"2025-11-25T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.128851 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.128915 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.128927 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.128948 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.128993 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:15Z","lastTransitionTime":"2025-11-25T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.231660 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.231808 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.231824 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.231840 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.231851 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:15Z","lastTransitionTime":"2025-11-25T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.334391 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.334462 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.334486 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.334549 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.334569 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:15Z","lastTransitionTime":"2025-11-25T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.438175 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.438207 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.438215 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.438229 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.438238 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:15Z","lastTransitionTime":"2025-11-25T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.540327 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.540359 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.540370 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.540386 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.540397 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:15Z","lastTransitionTime":"2025-11-25T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.643700 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.643778 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.643802 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.643831 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.643853 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:15Z","lastTransitionTime":"2025-11-25T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.746415 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.746461 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.746472 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.746487 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.746519 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:15Z","lastTransitionTime":"2025-11-25T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.747634 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.762029 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.777696 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.791821 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.812369 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.824821 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.837829 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.848190 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.848236 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.848248 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.848266 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.848282 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:15Z","lastTransitionTime":"2025-11-25T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.853437 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.865903 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.882090 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.892710 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.902994 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.913215 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.926669 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.951304 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.951345 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.951361 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.951378 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.951391 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:15Z","lastTransitionTime":"2025-11-25T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.954386 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"message\\\":\\\"P event handler 8\\\\nI1125 09:04:05.652343 6178 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:04:05.652418 6178 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652661 6178 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652984 6178 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653235 6178 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653484 6178 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:05.653536 6178 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:05.653545 6178 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:05.653594 6178 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:05.653632 6178 factory.go:656] Stopping watch factory\\\\nI1125 09:04:05.653642 6178 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:05.653669 6178 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:15 crc kubenswrapper[4687]: I1125 09:04:15.969387 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.060238 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.060279 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.060291 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.060308 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.060322 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:16Z","lastTransitionTime":"2025-11-25T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.163132 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.163184 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.163196 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.163212 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.163222 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:16Z","lastTransitionTime":"2025-11-25T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.266083 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.266630 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.266655 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.266686 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.266708 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:16Z","lastTransitionTime":"2025-11-25T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.369918 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.370022 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.370051 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.370081 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.370101 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:16Z","lastTransitionTime":"2025-11-25T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.472269 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.472318 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.472356 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.472380 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.472396 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:16Z","lastTransitionTime":"2025-11-25T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.485832 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.485992 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.486026 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:04:48.486001288 +0000 UTC m=+83.539641066 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.486105 4687 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.486183 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:48.486155633 +0000 UTC m=+83.539795401 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.586936 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.587003 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.587031 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.587122 4687 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.587135 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.587151 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.587165 4687 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.587180 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:48.58716483 +0000 UTC m=+83.640804548 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.587195 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:48.587188571 +0000 UTC m=+83.640828279 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.587220 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.587229 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.587235 4687 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.587255 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:48.587247312 +0000 UTC m=+83.640887030 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.594321 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.594348 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.594357 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.594370 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.594379 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:16Z","lastTransitionTime":"2025-11-25T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.696999 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.697047 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.697062 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.697085 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.697101 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:16Z","lastTransitionTime":"2025-11-25T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.734369 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.734636 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.734713 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.735155 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.734761 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.735301 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.735369 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:16 crc kubenswrapper[4687]: E1125 09:04:16.735037 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.799410 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.799857 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.800082 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.800334 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.800799 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:16Z","lastTransitionTime":"2025-11-25T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.903216 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.903285 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.903307 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.903334 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:16 crc kubenswrapper[4687]: I1125 09:04:16.903356 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:16Z","lastTransitionTime":"2025-11-25T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.012101 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.012164 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.012176 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.012201 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.012216 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:17Z","lastTransitionTime":"2025-11-25T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.115095 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.115173 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.115187 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.115211 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.115224 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:17Z","lastTransitionTime":"2025-11-25T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.219031 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.219088 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.219104 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.219121 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.219133 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:17Z","lastTransitionTime":"2025-11-25T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.322069 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.322128 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.322145 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.322173 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.322186 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:17Z","lastTransitionTime":"2025-11-25T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.424324 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.424375 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.424387 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.424404 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.424417 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:17Z","lastTransitionTime":"2025-11-25T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.527106 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.527185 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.527206 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.527233 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.527252 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:17Z","lastTransitionTime":"2025-11-25T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.630365 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.630435 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.630454 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.630476 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.630538 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:17Z","lastTransitionTime":"2025-11-25T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.732601 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.732655 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.732665 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.732684 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.732712 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:17Z","lastTransitionTime":"2025-11-25T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.834493 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.834579 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.834597 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.834618 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.834636 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:17Z","lastTransitionTime":"2025-11-25T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.938004 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.938165 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.938191 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.938280 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:17 crc kubenswrapper[4687]: I1125 09:04:17.938367 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:17Z","lastTransitionTime":"2025-11-25T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.043019 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.043131 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.043144 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.043160 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.043172 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:18Z","lastTransitionTime":"2025-11-25T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.146413 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.146478 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.146529 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.146557 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.146579 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:18Z","lastTransitionTime":"2025-11-25T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.249339 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.249399 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.249416 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.249442 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.249459 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:18Z","lastTransitionTime":"2025-11-25T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.352022 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.352063 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.352072 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.352104 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.352117 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:18Z","lastTransitionTime":"2025-11-25T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.454605 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.454874 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.455170 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.455252 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.455327 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:18Z","lastTransitionTime":"2025-11-25T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.557354 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.557437 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.557458 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.557485 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.557556 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:18Z","lastTransitionTime":"2025-11-25T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.659568 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.659817 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.659907 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.659991 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.660053 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:18Z","lastTransitionTime":"2025-11-25T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.734419 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.734459 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.734531 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.734630 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:18 crc kubenswrapper[4687]: E1125 09:04:18.734631 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:18 crc kubenswrapper[4687]: E1125 09:04:18.734695 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:18 crc kubenswrapper[4687]: E1125 09:04:18.734889 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:18 crc kubenswrapper[4687]: E1125 09:04:18.734936 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.735689 4687 scope.go:117] "RemoveContainer" containerID="9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.762278 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.762311 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.762319 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.762333 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.762342 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:18Z","lastTransitionTime":"2025-11-25T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.864930 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.864967 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.864979 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.864995 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.865004 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:18Z","lastTransitionTime":"2025-11-25T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.967398 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.967443 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.967454 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.967472 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:18 crc kubenswrapper[4687]: I1125 09:04:18.967485 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:18Z","lastTransitionTime":"2025-11-25T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.070526 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.070562 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.070576 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.070592 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.070605 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:19Z","lastTransitionTime":"2025-11-25T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.139253 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/1.log" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.143000 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.143722 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.166852 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.173280 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.173317 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.173332 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.173353 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.173367 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:19Z","lastTransitionTime":"2025-11-25T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.193277 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.217722 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.235524 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.260722 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.276288 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.276348 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.276366 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.276392 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.276411 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:19Z","lastTransitionTime":"2025-11-25T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.287334 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.306469 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.319574 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.332714 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.348871 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.362320 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.377469 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.379105 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.379155 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.379167 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.379185 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.379199 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:19Z","lastTransitionTime":"2025-11-25T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.390357 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.401521 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.417897 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"message\\\":\\\"P event handler 8\\\\nI1125 09:04:05.652343 6178 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:04:05.652418 6178 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652661 6178 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652984 6178 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653235 6178 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653484 6178 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:05.653536 6178 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:05.653545 6178 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:05.653594 6178 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:05.653632 6178 factory.go:656] Stopping watch factory\\\\nI1125 09:04:05.653642 6178 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:05.653669 6178 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.427538 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.436824 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.447844 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.467184 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.480883 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.480926 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.480938 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.480953 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.480964 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:19Z","lastTransitionTime":"2025-11-25T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.481722 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.497951 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.511848 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.523448 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.533747 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.544776 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.555417 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.566676 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.583489 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.583617 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.583643 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.583677 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.583700 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:19Z","lastTransitionTime":"2025-11-25T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.583691 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.596002 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.606470 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.623530 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"message\\\":\\\"P event handler 8\\\\nI1125 09:04:05.652343 6178 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:04:05.652418 6178 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652661 6178 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652984 6178 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653235 6178 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653484 6178 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:05.653536 6178 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:05.653545 6178 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:05.653594 6178 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:05.653632 6178 factory.go:656] Stopping watch factory\\\\nI1125 09:04:05.653642 6178 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:05.653669 6178 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.633891 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.647454 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.686258 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.686299 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.686307 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.686321 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.686331 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:19Z","lastTransitionTime":"2025-11-25T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.788121 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.788189 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.788202 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.788217 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.788226 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:19Z","lastTransitionTime":"2025-11-25T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.891222 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.891269 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.891280 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.891296 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.891308 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:19Z","lastTransitionTime":"2025-11-25T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.994585 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.994629 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.994641 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.994659 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:19 crc kubenswrapper[4687]: I1125 09:04:19.994671 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:19Z","lastTransitionTime":"2025-11-25T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.098215 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.098249 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.098259 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.098275 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.098288 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:20Z","lastTransitionTime":"2025-11-25T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.147708 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/2.log" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.148399 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/1.log" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.151704 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed" exitCode=1 Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.151742 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed"} Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.151775 4687 scope.go:117] "RemoveContainer" containerID="9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.152478 4687 scope.go:117] "RemoveContainer" containerID="8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed" Nov 25 09:04:20 crc kubenswrapper[4687]: E1125 09:04:20.152667 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.176813 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.203277 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.203377 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.203460 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.203485 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.203588 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.203616 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:20Z","lastTransitionTime":"2025-11-25T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.224346 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.241120 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.243666 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:20 crc kubenswrapper[4687]: E1125 09:04:20.243897 4687 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:20 crc kubenswrapper[4687]: E1125 09:04:20.245061 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs podName:0433643a-5ed9-485b-a788-51de4a92f461 nodeName:}" failed. No retries permitted until 2025-11-25 09:04:36.24501208 +0000 UTC m=+71.298651808 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs") pod "network-metrics-daemon-cscrb" (UID: "0433643a-5ed9-485b-a788-51de4a92f461") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.267833 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b6313e79f034beb6886ab82b8a0b95a8669c8c82010cadd765a57cbc8b62122\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"message\\\":\\\"P event handler 8\\\\nI1125 09:04:05.652343 6178 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:04:05.652418 6178 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652661 6178 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.652984 6178 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653235 6178 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:04:05.653484 6178 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:04:05.653536 6178 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:04:05.653545 6178 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:04:05.653594 6178 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:04:05.653632 6178 factory.go:656] Stopping watch factory\\\\nI1125 09:04:05.653642 6178 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:04:05.653669 6178 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"message\\\":\\\"b\\\\nI1125 09:04:19.660645 6361 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:19.660678 6361 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1125 09:04:19.660680 6361 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:19.660661 6361 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:19.660694 6361 base_network_controller_pods.go:916] Annot\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.280540 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.292267 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.305832 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.305880 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.305892 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.305910 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.305924 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:20Z","lastTransitionTime":"2025-11-25T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.318105 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.339063 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.353855 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.369305 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.389464 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.402837 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.408595 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.408648 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.408664 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.408684 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.408700 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:20Z","lastTransitionTime":"2025-11-25T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.420149 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.435591 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.451264 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.512448 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.512548 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.512566 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.512588 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.512604 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:20Z","lastTransitionTime":"2025-11-25T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.616292 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.616344 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.616355 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.616374 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.616386 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:20Z","lastTransitionTime":"2025-11-25T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.719182 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.719254 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.719275 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.719305 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.719328 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:20Z","lastTransitionTime":"2025-11-25T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.734045 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.734174 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.734063 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.734306 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:20 crc kubenswrapper[4687]: E1125 09:04:20.734294 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:20 crc kubenswrapper[4687]: E1125 09:04:20.734414 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:20 crc kubenswrapper[4687]: E1125 09:04:20.734666 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:20 crc kubenswrapper[4687]: E1125 09:04:20.734788 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.823065 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.823141 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.823168 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.823198 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.823216 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:20Z","lastTransitionTime":"2025-11-25T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.925854 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.925894 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.925904 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.925920 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:20 crc kubenswrapper[4687]: I1125 09:04:20.925930 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:20Z","lastTransitionTime":"2025-11-25T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.029212 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.029287 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.029322 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.029357 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.029380 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.132398 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.132447 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.132465 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.132489 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.132538 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.159256 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/2.log" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.165711 4687 scope.go:117] "RemoveContainer" containerID="8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed" Nov 25 09:04:21 crc kubenswrapper[4687]: E1125 09:04:21.166451 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.186869 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.207298 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.227218 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.235730 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.235807 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.235825 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.235852 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.235884 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.249052 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.266448 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.280444 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.293653 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.310387 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.325870 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.340163 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.340229 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.340255 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.340294 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.340318 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.343074 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.361456 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.389457 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"message\\\":\\\"b\\\\nI1125 09:04:19.660645 6361 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:19.660678 6361 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1125 09:04:19.660680 6361 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:19.660661 6361 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:19.660694 6361 base_network_controller_pods.go:916] Annot\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.410303 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.427195 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.443007 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.443065 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.443077 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.443094 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.443106 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.449566 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.469275 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.518994 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.533015 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.539702 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.545168 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.545208 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.545216 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.545228 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.545237 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.558876 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"message\\\":\\\"b\\\\nI1125 09:04:19.660645 6361 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:19.660678 6361 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1125 09:04:19.660680 6361 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:19.660661 6361 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:19.660694 6361 base_network_controller_pods.go:916] Annot\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.573021 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.590144 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.614131 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.632915 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.646795 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.648150 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.648205 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.648222 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.648247 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.648269 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.660453 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.675355 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.687663 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.699692 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.712871 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.728258 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.749414 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.751137 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.751166 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.751177 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.751190 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.751200 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.766688 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.780356 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.853912 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.853984 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.854003 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.854025 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.854039 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.894376 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.894486 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.894595 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.894620 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.894636 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: E1125 09:04:21.914704 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.918873 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.918917 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.918931 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.918953 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.918969 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: E1125 09:04:21.933206 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.937769 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.937804 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.937816 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.937832 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.937844 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: E1125 09:04:21.952301 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.956627 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.956680 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.956698 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.956720 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.956738 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:21 crc kubenswrapper[4687]: E1125 09:04:21.975129 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.982820 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.982911 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.982936 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.982968 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:21 crc kubenswrapper[4687]: I1125 09:04:21.982992 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:21Z","lastTransitionTime":"2025-11-25T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:22 crc kubenswrapper[4687]: E1125 09:04:22.001721 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:21Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:22 crc kubenswrapper[4687]: E1125 09:04:22.001854 4687 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.004089 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.004157 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.004175 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.004201 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.004220 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:22Z","lastTransitionTime":"2025-11-25T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.107197 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.107274 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.107292 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.107320 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.107344 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:22Z","lastTransitionTime":"2025-11-25T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.209755 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.209801 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.209820 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.209845 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.209864 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:22Z","lastTransitionTime":"2025-11-25T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.312658 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.312739 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.312762 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.312791 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.312815 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:22Z","lastTransitionTime":"2025-11-25T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.415208 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.415251 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.415265 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.415288 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.415305 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:22Z","lastTransitionTime":"2025-11-25T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.518961 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.519063 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.519079 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.519128 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.519146 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:22Z","lastTransitionTime":"2025-11-25T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.622088 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.622127 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.622139 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.622155 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.622167 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:22Z","lastTransitionTime":"2025-11-25T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.725029 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.725087 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.725098 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.725117 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.725134 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:22Z","lastTransitionTime":"2025-11-25T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.734726 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.734875 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:22 crc kubenswrapper[4687]: E1125 09:04:22.735061 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.735164 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.735174 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:22 crc kubenswrapper[4687]: E1125 09:04:22.735353 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:22 crc kubenswrapper[4687]: E1125 09:04:22.735449 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:22 crc kubenswrapper[4687]: E1125 09:04:22.735638 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.828885 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.828962 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.828985 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.829021 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.829044 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:22Z","lastTransitionTime":"2025-11-25T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.931851 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.931905 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.931917 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.931935 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:22 crc kubenswrapper[4687]: I1125 09:04:22.931947 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:22Z","lastTransitionTime":"2025-11-25T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.035033 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.035110 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.035128 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.035153 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.035171 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:23Z","lastTransitionTime":"2025-11-25T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.138594 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.138677 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.138717 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.138746 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.138767 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:23Z","lastTransitionTime":"2025-11-25T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.242291 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.242361 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.242378 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.242405 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.242424 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:23Z","lastTransitionTime":"2025-11-25T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.345325 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.345391 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.345413 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.345444 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.345465 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:23Z","lastTransitionTime":"2025-11-25T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.448806 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.448872 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.448892 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.448919 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.448937 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:23Z","lastTransitionTime":"2025-11-25T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.553002 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.553398 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.553417 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.553534 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.553557 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:23Z","lastTransitionTime":"2025-11-25T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.657026 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.657101 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.657123 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.657147 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.657164 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:23Z","lastTransitionTime":"2025-11-25T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.759982 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.760026 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.760045 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.760066 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.760083 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:23Z","lastTransitionTime":"2025-11-25T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.865524 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.865794 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.865880 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.865991 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.866078 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:23Z","lastTransitionTime":"2025-11-25T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.969442 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.969713 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.969840 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.969937 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:23 crc kubenswrapper[4687]: I1125 09:04:23.970055 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:23Z","lastTransitionTime":"2025-11-25T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.072982 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.073068 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.073086 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.073110 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.073128 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:24Z","lastTransitionTime":"2025-11-25T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.175796 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.175850 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.175867 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.175890 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.175907 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:24Z","lastTransitionTime":"2025-11-25T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.279102 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.279253 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.279283 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.279309 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.279331 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:24Z","lastTransitionTime":"2025-11-25T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.383188 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.383251 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.383274 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.383303 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.383321 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:24Z","lastTransitionTime":"2025-11-25T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.486657 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.486710 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.486729 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.486753 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.486770 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:24Z","lastTransitionTime":"2025-11-25T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.590207 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.590272 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.590289 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.590315 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.590333 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:24Z","lastTransitionTime":"2025-11-25T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.693274 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.693337 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.693354 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.693378 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.693395 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:24Z","lastTransitionTime":"2025-11-25T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.734383 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.734534 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.734557 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:24 crc kubenswrapper[4687]: E1125 09:04:24.734591 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.734662 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:24 crc kubenswrapper[4687]: E1125 09:04:24.734841 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:24 crc kubenswrapper[4687]: E1125 09:04:24.735041 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:24 crc kubenswrapper[4687]: E1125 09:04:24.735145 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.796723 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.796800 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.796825 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.796857 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.796886 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:24Z","lastTransitionTime":"2025-11-25T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.899828 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.899918 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.899944 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.899976 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:24 crc kubenswrapper[4687]: I1125 09:04:24.899999 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:24Z","lastTransitionTime":"2025-11-25T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.003197 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.003245 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.003253 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.003267 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.003276 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:25Z","lastTransitionTime":"2025-11-25T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.106140 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.106191 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.106208 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.106228 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.106243 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:25Z","lastTransitionTime":"2025-11-25T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.209021 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.209382 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.209604 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.209854 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.210113 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:25Z","lastTransitionTime":"2025-11-25T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.313441 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.313936 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.314249 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.314601 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.314986 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:25Z","lastTransitionTime":"2025-11-25T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.418317 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.418376 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.418401 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.418423 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.418441 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:25Z","lastTransitionTime":"2025-11-25T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.520802 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.520877 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.520894 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.520912 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.520925 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:25Z","lastTransitionTime":"2025-11-25T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.623845 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.623882 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.623892 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.623907 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.623917 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:25Z","lastTransitionTime":"2025-11-25T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.725938 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.725995 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.726010 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.726028 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.726042 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:25Z","lastTransitionTime":"2025-11-25T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.769666 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"message\\\":\\\"b\\\\nI1125 09:04:19.660645 6361 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:19.660678 6361 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1125 09:04:19.660680 6361 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:19.660661 6361 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:19.660694 6361 base_network_controller_pods.go:916] Annot\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.784467 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.804089 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.817362 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.828860 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.828906 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.828918 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.828937 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.828948 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:25Z","lastTransitionTime":"2025-11-25T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.834318 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.856250 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.871801 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.884331 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.896462 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.908462 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.920619 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.931424 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.931458 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.931470 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.931487 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.931519 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:25Z","lastTransitionTime":"2025-11-25T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.943682 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15c26146-fc78-4141-8156-dbfb4c379668\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fed588c0fe3b1c0c10c1ff1a154c1ae83a483da23e9b38c0b919a2b293a76b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f9867bd6213ab7153f99348331e4fedb1cac3236436c454ab2974ee3fdb1d9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ecdc1a162e491f6c98ecee45e49897884410f336f2735113e00d110f86e0e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.954224 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.965962 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.979425 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.990862 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:25 crc kubenswrapper[4687]: I1125 09:04:25.999902 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.033697 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.033731 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.033741 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.033756 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.033768 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:26Z","lastTransitionTime":"2025-11-25T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.136775 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.136851 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.136877 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.136907 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.136925 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:26Z","lastTransitionTime":"2025-11-25T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.239577 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.239649 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.239670 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.239696 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.239720 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:26Z","lastTransitionTime":"2025-11-25T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.342749 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.342807 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.342828 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.342858 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.342880 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:26Z","lastTransitionTime":"2025-11-25T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.445725 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.445761 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.445770 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.445784 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.445796 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:26Z","lastTransitionTime":"2025-11-25T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.548061 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.548115 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.548132 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.548155 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.548172 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:26Z","lastTransitionTime":"2025-11-25T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.651466 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.651632 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.651650 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.651673 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.651691 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:26Z","lastTransitionTime":"2025-11-25T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.733955 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.733997 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:26 crc kubenswrapper[4687]: E1125 09:04:26.734118 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.734144 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.734158 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:26 crc kubenswrapper[4687]: E1125 09:04:26.734238 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:26 crc kubenswrapper[4687]: E1125 09:04:26.734294 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:26 crc kubenswrapper[4687]: E1125 09:04:26.734312 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.754852 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.754904 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.754936 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.754956 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.754968 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:26Z","lastTransitionTime":"2025-11-25T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.857716 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.857765 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.857778 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.857796 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.857808 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:26Z","lastTransitionTime":"2025-11-25T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.960119 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.960189 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.960204 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.960226 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:26 crc kubenswrapper[4687]: I1125 09:04:26.960238 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:26Z","lastTransitionTime":"2025-11-25T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.064157 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.064233 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.064248 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.064271 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.064285 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:27Z","lastTransitionTime":"2025-11-25T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.166628 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.166687 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.166705 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.166729 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.166748 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:27Z","lastTransitionTime":"2025-11-25T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.269120 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.269172 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.269183 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.269199 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.269210 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:27Z","lastTransitionTime":"2025-11-25T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.371554 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.371613 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.371625 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.371643 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.371655 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:27Z","lastTransitionTime":"2025-11-25T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.474446 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.474491 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.474516 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.474532 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.474544 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:27Z","lastTransitionTime":"2025-11-25T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.577240 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.577311 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.577328 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.577353 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.577374 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:27Z","lastTransitionTime":"2025-11-25T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.680357 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.680423 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.680444 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.680473 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.680495 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:27Z","lastTransitionTime":"2025-11-25T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.783191 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.783234 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.783247 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.783267 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.783278 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:27Z","lastTransitionTime":"2025-11-25T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.886313 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.886400 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.886425 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.886470 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.886492 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:27Z","lastTransitionTime":"2025-11-25T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.989796 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.989865 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.989888 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.989918 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:27 crc kubenswrapper[4687]: I1125 09:04:27.989940 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:27Z","lastTransitionTime":"2025-11-25T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.092785 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.092818 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.092826 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.092841 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.092852 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:28Z","lastTransitionTime":"2025-11-25T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.196305 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.196374 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.196393 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.196417 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.196434 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:28Z","lastTransitionTime":"2025-11-25T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.299495 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.299584 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.299599 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.299617 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.299628 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:28Z","lastTransitionTime":"2025-11-25T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.402927 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.402999 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.403019 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.403045 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.403062 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:28Z","lastTransitionTime":"2025-11-25T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.505154 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.505216 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.505232 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.505254 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.505268 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:28Z","lastTransitionTime":"2025-11-25T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.608662 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.608756 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.608783 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.608814 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.608837 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:28Z","lastTransitionTime":"2025-11-25T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.712772 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.712841 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.712861 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.712891 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.712909 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:28Z","lastTransitionTime":"2025-11-25T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.734273 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.734278 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.734290 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.734447 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:28 crc kubenswrapper[4687]: E1125 09:04:28.734735 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:28 crc kubenswrapper[4687]: E1125 09:04:28.734816 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:28 crc kubenswrapper[4687]: E1125 09:04:28.734894 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:28 crc kubenswrapper[4687]: E1125 09:04:28.735019 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.816068 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.816134 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.816154 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.816189 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.816225 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:28Z","lastTransitionTime":"2025-11-25T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.919087 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.919147 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.919160 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.919184 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:28 crc kubenswrapper[4687]: I1125 09:04:28.919197 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:28Z","lastTransitionTime":"2025-11-25T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.022624 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.022684 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.022706 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.022735 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.022757 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:29Z","lastTransitionTime":"2025-11-25T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.126040 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.126104 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.126125 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.126152 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.126171 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:29Z","lastTransitionTime":"2025-11-25T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.228565 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.228639 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.228659 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.228704 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.228732 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:29Z","lastTransitionTime":"2025-11-25T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.332366 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.332425 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.332441 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.332465 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.332483 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:29Z","lastTransitionTime":"2025-11-25T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.436320 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.436398 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.436424 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.436453 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.436477 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:29Z","lastTransitionTime":"2025-11-25T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.539339 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.539391 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.539406 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.539424 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.539457 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:29Z","lastTransitionTime":"2025-11-25T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.642074 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.642119 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.642130 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.642145 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.642157 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:29Z","lastTransitionTime":"2025-11-25T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.745368 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.745413 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.745422 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.745436 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.745445 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:29Z","lastTransitionTime":"2025-11-25T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.848257 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.848298 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.848333 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.848350 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.848360 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:29Z","lastTransitionTime":"2025-11-25T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.950562 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.950605 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.950619 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.950640 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:29 crc kubenswrapper[4687]: I1125 09:04:29.950656 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:29Z","lastTransitionTime":"2025-11-25T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.053007 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.053055 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.053066 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.053083 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.053097 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:30Z","lastTransitionTime":"2025-11-25T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.155921 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.155985 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.156002 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.156025 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.156041 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:30Z","lastTransitionTime":"2025-11-25T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.257956 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.258018 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.258035 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.258058 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.258076 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:30Z","lastTransitionTime":"2025-11-25T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.360633 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.360695 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.360719 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.360749 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.360771 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:30Z","lastTransitionTime":"2025-11-25T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.464976 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.465017 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.465028 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.465057 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.465069 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:30Z","lastTransitionTime":"2025-11-25T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.568131 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.568201 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.568265 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.568298 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.568319 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:30Z","lastTransitionTime":"2025-11-25T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.671594 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.671659 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.671667 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.671681 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.671709 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:30Z","lastTransitionTime":"2025-11-25T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.734343 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.734417 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.734446 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:30 crc kubenswrapper[4687]: E1125 09:04:30.734534 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.734418 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:30 crc kubenswrapper[4687]: E1125 09:04:30.734605 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:30 crc kubenswrapper[4687]: E1125 09:04:30.734675 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:30 crc kubenswrapper[4687]: E1125 09:04:30.734738 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.774249 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.774308 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.774322 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.774337 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.774349 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:30Z","lastTransitionTime":"2025-11-25T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.876980 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.877026 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.877035 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.877047 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.877056 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:30Z","lastTransitionTime":"2025-11-25T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.979686 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.979723 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.979733 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.979746 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:30 crc kubenswrapper[4687]: I1125 09:04:30.979756 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:30Z","lastTransitionTime":"2025-11-25T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.081862 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.081891 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.081900 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.081913 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.081922 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:31Z","lastTransitionTime":"2025-11-25T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.184587 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.184631 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.184643 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.184660 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.184670 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:31Z","lastTransitionTime":"2025-11-25T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.287024 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.287051 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.287059 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.287071 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.287080 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:31Z","lastTransitionTime":"2025-11-25T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.391098 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.391222 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.391257 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.391287 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.391309 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:31Z","lastTransitionTime":"2025-11-25T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.495120 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.495175 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.495193 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.495212 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.495227 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:31Z","lastTransitionTime":"2025-11-25T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.598060 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.598105 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.598118 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.598136 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.598149 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:31Z","lastTransitionTime":"2025-11-25T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.700621 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.700673 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.700687 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.700707 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.700722 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:31Z","lastTransitionTime":"2025-11-25T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.803136 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.803197 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.803210 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.803230 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.803242 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:31Z","lastTransitionTime":"2025-11-25T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.906806 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.906870 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.906887 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.906914 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:31 crc kubenswrapper[4687]: I1125 09:04:31.906931 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:31Z","lastTransitionTime":"2025-11-25T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.010036 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.010129 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.010154 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.010180 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.010197 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.113044 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.113126 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.113149 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.113173 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.113192 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.193578 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.193648 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.193669 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.193694 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.193712 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: E1125 09:04:32.211336 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.215339 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.215392 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.215408 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.215434 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.215456 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: E1125 09:04:32.230776 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.235236 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.235281 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.235292 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.235314 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.235328 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: E1125 09:04:32.250363 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.254791 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.254815 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.254824 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.254837 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.254848 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: E1125 09:04:32.271142 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.274754 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.274802 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.274819 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.274843 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.274859 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: E1125 09:04:32.291237 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:32 crc kubenswrapper[4687]: E1125 09:04:32.291462 4687 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.293110 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.293147 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.293155 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.293170 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.293180 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.395702 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.395741 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.395750 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.395767 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.395776 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.498529 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.498565 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.498573 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.498588 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.498597 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.601288 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.601328 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.601336 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.601365 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.601375 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.703083 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.703154 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.703171 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.703196 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.703213 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.734029 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.734036 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.734080 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.734144 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:32 crc kubenswrapper[4687]: E1125 09:04:32.734940 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:32 crc kubenswrapper[4687]: E1125 09:04:32.735071 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:32 crc kubenswrapper[4687]: E1125 09:04:32.735181 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:32 crc kubenswrapper[4687]: E1125 09:04:32.735246 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.805567 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.805617 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.805629 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.805648 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.805664 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.908919 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.909239 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.909305 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.909388 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:32 crc kubenswrapper[4687]: I1125 09:04:32.909453 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:32Z","lastTransitionTime":"2025-11-25T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.011200 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.011428 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.011490 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.011605 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.011678 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:33Z","lastTransitionTime":"2025-11-25T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.114258 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.114317 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.114334 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.114358 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.114375 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:33Z","lastTransitionTime":"2025-11-25T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.221673 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.221707 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.221718 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.221734 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.221745 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:33Z","lastTransitionTime":"2025-11-25T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.324249 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.324285 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.324294 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.324309 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.324322 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:33Z","lastTransitionTime":"2025-11-25T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.427072 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.427115 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.427124 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.427139 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.427152 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:33Z","lastTransitionTime":"2025-11-25T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.529995 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.530042 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.530053 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.530069 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.530083 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:33Z","lastTransitionTime":"2025-11-25T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.632564 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.632605 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.632616 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.632632 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.632644 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:33Z","lastTransitionTime":"2025-11-25T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.735423 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.735475 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.735489 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.735530 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.735548 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:33Z","lastTransitionTime":"2025-11-25T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.838121 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.838162 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.838178 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.838194 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.838204 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:33Z","lastTransitionTime":"2025-11-25T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.941042 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.941376 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.941388 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.941402 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:33 crc kubenswrapper[4687]: I1125 09:04:33.941411 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:33Z","lastTransitionTime":"2025-11-25T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.043931 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.043989 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.044007 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.044029 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.044046 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:34Z","lastTransitionTime":"2025-11-25T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.146936 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.146988 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.147001 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.147020 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.147031 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:34Z","lastTransitionTime":"2025-11-25T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.248988 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.249019 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.249027 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.249039 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.249047 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:34Z","lastTransitionTime":"2025-11-25T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.351343 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.351417 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.351431 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.351447 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.351457 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:34Z","lastTransitionTime":"2025-11-25T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.454059 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.454102 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.454112 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.454128 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.454141 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:34Z","lastTransitionTime":"2025-11-25T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.556731 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.556798 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.556816 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.556839 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.556856 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:34Z","lastTransitionTime":"2025-11-25T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.659312 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.659361 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.659376 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.659394 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.659406 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:34Z","lastTransitionTime":"2025-11-25T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.734753 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.734860 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.734888 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:34 crc kubenswrapper[4687]: E1125 09:04:34.734910 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.735007 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:34 crc kubenswrapper[4687]: E1125 09:04:34.735017 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:34 crc kubenswrapper[4687]: E1125 09:04:34.735180 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:34 crc kubenswrapper[4687]: E1125 09:04:34.735296 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.761775 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.761807 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.761818 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.761834 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.761846 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:34Z","lastTransitionTime":"2025-11-25T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.863933 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.863967 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.863977 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.863992 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.864003 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:34Z","lastTransitionTime":"2025-11-25T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.966391 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.966443 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.966456 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.966475 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:34 crc kubenswrapper[4687]: I1125 09:04:34.966487 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:34Z","lastTransitionTime":"2025-11-25T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.069187 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.069243 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.069255 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.069272 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.069283 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:35Z","lastTransitionTime":"2025-11-25T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.171871 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.171942 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.171955 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.171977 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.171992 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:35Z","lastTransitionTime":"2025-11-25T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.275095 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.275178 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.275203 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.275227 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.275248 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:35Z","lastTransitionTime":"2025-11-25T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.378083 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.378143 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.378155 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.378175 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.378190 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:35Z","lastTransitionTime":"2025-11-25T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.480534 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.480603 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.480626 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.480656 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.480673 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:35Z","lastTransitionTime":"2025-11-25T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.583463 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.583531 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.583551 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.583567 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.583586 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:35Z","lastTransitionTime":"2025-11-25T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.686018 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.686052 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.686060 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.686074 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.686083 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:35Z","lastTransitionTime":"2025-11-25T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.736104 4687 scope.go:117] "RemoveContainer" containerID="8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed" Nov 25 09:04:35 crc kubenswrapper[4687]: E1125 09:04:35.736448 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.752127 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.769764 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"message\\\":\\\"b\\\\nI1125 09:04:19.660645 6361 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:19.660678 6361 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1125 09:04:19.660680 6361 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:19.660661 6361 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:19.660694 6361 base_network_controller_pods.go:916] Annot\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.779117 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.788622 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.788653 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.788661 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.788690 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.788698 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:35Z","lastTransitionTime":"2025-11-25T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.793655 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.805239 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.816634 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.827283 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.838395 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.851200 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.862729 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.874529 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.882815 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.891070 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.891119 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.891129 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.891142 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.891150 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:35Z","lastTransitionTime":"2025-11-25T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.891799 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.903394 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15c26146-fc78-4141-8156-dbfb4c379668\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fed588c0fe3b1c0c10c1ff1a154c1ae83a483da23e9b38c0b919a2b293a76b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f9867bd6213ab7153f99348331e4fedb1cac3236436c454ab2974ee3fdb1d9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ecdc1a162e491f6c98ecee45e49897884410f336f2735113e00d110f86e0e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.914534 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.925113 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.937002 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.993705 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.993762 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.993774 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.993789 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:35 crc kubenswrapper[4687]: I1125 09:04:35.993799 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:35Z","lastTransitionTime":"2025-11-25T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.096610 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.096652 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.096664 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.096681 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.096693 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:36Z","lastTransitionTime":"2025-11-25T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.198848 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.198882 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.198894 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.198913 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.198931 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:36Z","lastTransitionTime":"2025-11-25T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.301711 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.301761 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.301786 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.301802 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.301812 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:36Z","lastTransitionTime":"2025-11-25T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.331771 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:36 crc kubenswrapper[4687]: E1125 09:04:36.332008 4687 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:36 crc kubenswrapper[4687]: E1125 09:04:36.332116 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs podName:0433643a-5ed9-485b-a788-51de4a92f461 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:08.33208881 +0000 UTC m=+103.385728558 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs") pod "network-metrics-daemon-cscrb" (UID: "0433643a-5ed9-485b-a788-51de4a92f461") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.403836 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.403873 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.403883 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.403898 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.403909 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:36Z","lastTransitionTime":"2025-11-25T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.506266 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.506299 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.506307 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.506323 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.506333 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:36Z","lastTransitionTime":"2025-11-25T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.608674 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.608732 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.608743 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.608759 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.608792 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:36Z","lastTransitionTime":"2025-11-25T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.710627 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.710675 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.710686 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.710703 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.710714 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:36Z","lastTransitionTime":"2025-11-25T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.734134 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.734150 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.734195 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.734255 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:36 crc kubenswrapper[4687]: E1125 09:04:36.734436 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:36 crc kubenswrapper[4687]: E1125 09:04:36.734529 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:36 crc kubenswrapper[4687]: E1125 09:04:36.734646 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:36 crc kubenswrapper[4687]: E1125 09:04:36.734701 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.813249 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.813282 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.813289 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.813301 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.813309 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:36Z","lastTransitionTime":"2025-11-25T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.915993 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.916037 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.916048 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.916065 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:36 crc kubenswrapper[4687]: I1125 09:04:36.916076 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:36Z","lastTransitionTime":"2025-11-25T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.018770 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.018807 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.018818 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.018834 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.018876 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:37Z","lastTransitionTime":"2025-11-25T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.121532 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.121565 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.121577 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.121590 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.121600 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:37Z","lastTransitionTime":"2025-11-25T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.224644 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.224690 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.224707 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.224728 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.224744 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:37Z","lastTransitionTime":"2025-11-25T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.326761 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.326801 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.326812 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.326828 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.326841 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:37Z","lastTransitionTime":"2025-11-25T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.429296 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.429340 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.429353 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.429371 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.429383 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:37Z","lastTransitionTime":"2025-11-25T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.531527 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.531558 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.531568 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.531580 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.531588 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:37Z","lastTransitionTime":"2025-11-25T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.634328 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.634391 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.634405 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.634422 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.634435 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:37Z","lastTransitionTime":"2025-11-25T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.737388 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.737438 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.737451 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.737467 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.737479 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:37Z","lastTransitionTime":"2025-11-25T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.840445 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.841284 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.841417 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.841586 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.841688 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:37Z","lastTransitionTime":"2025-11-25T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.944463 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.944492 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.944516 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.944531 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:37 crc kubenswrapper[4687]: I1125 09:04:37.944542 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:37Z","lastTransitionTime":"2025-11-25T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.046458 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.046830 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.046951 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.047067 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.047178 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:38Z","lastTransitionTime":"2025-11-25T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.150420 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.150486 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.150526 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.150546 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.150562 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:38Z","lastTransitionTime":"2025-11-25T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.255587 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.255626 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.255637 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.255656 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.255667 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:38Z","lastTransitionTime":"2025-11-25T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.357587 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.357625 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.357635 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.357651 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.357675 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:38Z","lastTransitionTime":"2025-11-25T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.459822 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.459862 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.459874 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.459888 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.459900 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:38Z","lastTransitionTime":"2025-11-25T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.562470 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.562570 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.562584 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.562618 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.562631 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:38Z","lastTransitionTime":"2025-11-25T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.665810 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.665867 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.665878 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.665897 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.665908 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:38Z","lastTransitionTime":"2025-11-25T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.734610 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.734683 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.734683 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.734704 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:38 crc kubenswrapper[4687]: E1125 09:04:38.734856 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:38 crc kubenswrapper[4687]: E1125 09:04:38.734933 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:38 crc kubenswrapper[4687]: E1125 09:04:38.735008 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:38 crc kubenswrapper[4687]: E1125 09:04:38.735097 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.768377 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.768487 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.768528 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.768550 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.768567 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:38Z","lastTransitionTime":"2025-11-25T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.872108 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.872151 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.872164 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.872182 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.872194 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:38Z","lastTransitionTime":"2025-11-25T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.975090 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.975153 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.975171 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.975197 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:38 crc kubenswrapper[4687]: I1125 09:04:38.975220 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:38Z","lastTransitionTime":"2025-11-25T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.078759 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.078817 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.078829 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.078849 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.078861 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:39Z","lastTransitionTime":"2025-11-25T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.181836 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.182174 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.182321 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.182469 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.182628 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:39Z","lastTransitionTime":"2025-11-25T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.288171 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.288462 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.288614 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.288723 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.288808 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:39Z","lastTransitionTime":"2025-11-25T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.391967 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.392268 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.392435 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.392703 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.392906 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:39Z","lastTransitionTime":"2025-11-25T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.495729 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.496052 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.496317 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.496559 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.496693 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:39Z","lastTransitionTime":"2025-11-25T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.599608 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.599642 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.599652 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.599667 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.599677 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:39Z","lastTransitionTime":"2025-11-25T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.701583 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.701624 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.701635 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.701650 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.701661 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:39Z","lastTransitionTime":"2025-11-25T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.804476 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.804557 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.804576 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.804610 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.804647 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:39Z","lastTransitionTime":"2025-11-25T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.908051 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.908113 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.908129 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.908152 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:39 crc kubenswrapper[4687]: I1125 09:04:39.908168 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:39Z","lastTransitionTime":"2025-11-25T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.011269 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.011329 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.011342 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.011364 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.011377 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:40Z","lastTransitionTime":"2025-11-25T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.114364 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.114431 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.114457 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.114488 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.114546 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:40Z","lastTransitionTime":"2025-11-25T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.216551 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.216599 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.216611 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.216628 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.216641 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:40Z","lastTransitionTime":"2025-11-25T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.225266 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wlzrb_0e7c96e4-c7fa-466f-b0b6-495612ed71f8/kube-multus/0.log" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.225325 4687 generic.go:334] "Generic (PLEG): container finished" podID="0e7c96e4-c7fa-466f-b0b6-495612ed71f8" containerID="42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45" exitCode=1 Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.225553 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wlzrb" event={"ID":"0e7c96e4-c7fa-466f-b0b6-495612ed71f8","Type":"ContainerDied","Data":"42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.225937 4687 scope.go:117] "RemoveContainer" containerID="42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.238743 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.256366 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.269988 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:39Z\\\",\\\"message\\\":\\\"2025-11-25T09:03:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964\\\\n2025-11-25T09:03:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964 to /host/opt/cni/bin/\\\\n2025-11-25T09:03:54Z [verbose] multus-daemon started\\\\n2025-11-25T09:03:54Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:04:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.283838 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.294462 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.312218 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.321032 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.321064 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.321075 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.321089 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.321099 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:40Z","lastTransitionTime":"2025-11-25T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.324092 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15c26146-fc78-4141-8156-dbfb4c379668\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fed588c0fe3b1c0c10c1ff1a154c1ae83a483da23e9b38c0b919a2b293a76b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f9867bd6213ab7153f99348331e4fedb1cac3236436c454ab2974ee3fdb1d9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ecdc1a162e491f6c98ecee45e49897884410f336f2735113e00d110f86e0e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.336853 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.349220 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.363473 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.379423 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.393358 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.405258 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.416875 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.423349 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.423384 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.423392 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.423406 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.423415 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:40Z","lastTransitionTime":"2025-11-25T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.430875 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.451563 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"message\\\":\\\"b\\\\nI1125 09:04:19.660645 6361 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:19.660678 6361 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1125 09:04:19.660680 6361 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:19.660661 6361 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:19.660694 6361 base_network_controller_pods.go:916] Annot\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.463979 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.525337 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.525375 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.525386 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.525404 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.525414 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:40Z","lastTransitionTime":"2025-11-25T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.628933 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.628989 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.629005 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.629023 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.629035 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:40Z","lastTransitionTime":"2025-11-25T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.731863 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.731918 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.731934 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.731956 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.731972 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:40Z","lastTransitionTime":"2025-11-25T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.734678 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:40 crc kubenswrapper[4687]: E1125 09:04:40.734789 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.734850 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:40 crc kubenswrapper[4687]: E1125 09:04:40.734923 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.734974 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:40 crc kubenswrapper[4687]: E1125 09:04:40.735026 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.735070 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:40 crc kubenswrapper[4687]: E1125 09:04:40.735118 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.835566 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.835617 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.835628 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.835644 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.835653 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:40Z","lastTransitionTime":"2025-11-25T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.938621 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.938686 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.938723 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.938753 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:40 crc kubenswrapper[4687]: I1125 09:04:40.938776 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:40Z","lastTransitionTime":"2025-11-25T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.042530 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.042588 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.042601 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.042621 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.042635 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:41Z","lastTransitionTime":"2025-11-25T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.145863 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.145931 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.145948 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.145972 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.145990 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:41Z","lastTransitionTime":"2025-11-25T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.232140 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wlzrb_0e7c96e4-c7fa-466f-b0b6-495612ed71f8/kube-multus/0.log" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.232213 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wlzrb" event={"ID":"0e7c96e4-c7fa-466f-b0b6-495612ed71f8","Type":"ContainerStarted","Data":"fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.248972 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.249034 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.249051 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.249076 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.249097 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:41Z","lastTransitionTime":"2025-11-25T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.261915 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"message\\\":\\\"b\\\\nI1125 09:04:19.660645 6361 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:19.660678 6361 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1125 09:04:19.660680 6361 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:19.660661 6361 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:19.660694 6361 base_network_controller_pods.go:916] Annot\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.277194 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.298632 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.314173 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.333418 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.349830 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:39Z\\\",\\\"message\\\":\\\"2025-11-25T09:03:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964\\\\n2025-11-25T09:03:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964 to /host/opt/cni/bin/\\\\n2025-11-25T09:03:54Z [verbose] multus-daemon started\\\\n2025-11-25T09:03:54Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:04:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.351948 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.352000 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.352014 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.352034 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.352050 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:41Z","lastTransitionTime":"2025-11-25T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.365756 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.377291 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.389489 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.400096 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.410874 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.421704 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15c26146-fc78-4141-8156-dbfb4c379668\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fed588c0fe3b1c0c10c1ff1a154c1ae83a483da23e9b38c0b919a2b293a76b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f9867bd6213ab7153f99348331e4fedb1cac3236436c454ab2974ee3fdb1d9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ecdc1a162e491f6c98ecee45e49897884410f336f2735113e00d110f86e0e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.433695 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.444364 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.453630 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.453671 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.453680 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.453693 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.453702 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:41Z","lastTransitionTime":"2025-11-25T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.455725 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.465795 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.474608 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.556721 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.556770 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.556787 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.556808 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.556825 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:41Z","lastTransitionTime":"2025-11-25T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.660065 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.660146 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.660182 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.660213 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.660234 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:41Z","lastTransitionTime":"2025-11-25T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.763525 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.763587 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.763605 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.763632 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.763648 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:41Z","lastTransitionTime":"2025-11-25T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.866147 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.866213 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.866230 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.866255 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.866280 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:41Z","lastTransitionTime":"2025-11-25T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.969259 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.969332 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.969355 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.969384 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:41 crc kubenswrapper[4687]: I1125 09:04:41.969405 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:41Z","lastTransitionTime":"2025-11-25T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.072949 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.073000 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.073012 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.073029 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.073045 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.176244 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.176311 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.176334 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.176365 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.176389 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.279767 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.279842 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.279860 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.279888 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.279908 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.382631 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.382679 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.382736 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.382760 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.382775 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.485218 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.485260 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.485271 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.485287 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.485299 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.587475 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.587587 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.587613 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.587637 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.587655 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.657654 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.657706 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.657721 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.657742 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.657758 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: E1125 09:04:42.673695 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.678600 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.678663 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.678683 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.678708 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.678727 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: E1125 09:04:42.698667 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.704062 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.704114 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.704131 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.704156 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.704176 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: E1125 09:04:42.722672 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.727364 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.727395 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.727406 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.727422 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.727433 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.733774 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.733839 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:42 crc kubenswrapper[4687]: E1125 09:04:42.733952 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.734031 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.734054 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:42 crc kubenswrapper[4687]: E1125 09:04:42.734096 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:42 crc kubenswrapper[4687]: E1125 09:04:42.734136 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:42 crc kubenswrapper[4687]: E1125 09:04:42.734178 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:42 crc kubenswrapper[4687]: E1125 09:04:42.739778 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.744838 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.744863 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.744872 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.744883 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.744893 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: E1125 09:04:42.759937 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:42 crc kubenswrapper[4687]: E1125 09:04:42.760076 4687 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.761275 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.761298 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.761308 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.761321 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.761331 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.864729 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.864851 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.864874 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.864907 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.864930 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.967800 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.967879 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.967949 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.967974 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:42 crc kubenswrapper[4687]: I1125 09:04:42.967993 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:42Z","lastTransitionTime":"2025-11-25T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.070649 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.070699 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.070712 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.070730 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.070743 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:43Z","lastTransitionTime":"2025-11-25T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.172829 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.172875 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.172891 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.172913 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.172931 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:43Z","lastTransitionTime":"2025-11-25T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.276085 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.276117 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.276128 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.276143 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.276154 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:43Z","lastTransitionTime":"2025-11-25T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.378360 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.378409 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.378420 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.378436 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.378449 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:43Z","lastTransitionTime":"2025-11-25T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.481700 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.481766 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.481784 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.481809 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.481831 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:43Z","lastTransitionTime":"2025-11-25T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.584754 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.584836 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.584858 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.584885 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.584904 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:43Z","lastTransitionTime":"2025-11-25T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.687802 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.687851 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.687861 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.687881 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.687894 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:43Z","lastTransitionTime":"2025-11-25T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.790772 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.790826 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.790838 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.790860 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.790874 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:43Z","lastTransitionTime":"2025-11-25T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.894241 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.894303 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.894317 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.894340 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.894353 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:43Z","lastTransitionTime":"2025-11-25T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.997053 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.997100 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.997115 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.997133 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:43 crc kubenswrapper[4687]: I1125 09:04:43.997144 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:43Z","lastTransitionTime":"2025-11-25T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.099898 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.099944 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.099954 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.099971 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.099982 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:44Z","lastTransitionTime":"2025-11-25T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.202903 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.202957 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.202973 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.202994 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.203012 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:44Z","lastTransitionTime":"2025-11-25T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.306091 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.306166 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.306184 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.306208 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.306225 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:44Z","lastTransitionTime":"2025-11-25T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.410183 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.410241 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.410257 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.410279 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.410294 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:44Z","lastTransitionTime":"2025-11-25T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.513859 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.513898 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.513907 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.513921 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.513932 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:44Z","lastTransitionTime":"2025-11-25T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.616905 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.616970 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.616987 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.617010 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.617028 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:44Z","lastTransitionTime":"2025-11-25T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.719129 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.719203 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.719226 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.719257 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.719280 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:44Z","lastTransitionTime":"2025-11-25T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.734630 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.734759 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:44 crc kubenswrapper[4687]: E1125 09:04:44.735013 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.735044 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.735055 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:44 crc kubenswrapper[4687]: E1125 09:04:44.735153 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:44 crc kubenswrapper[4687]: E1125 09:04:44.735279 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:44 crc kubenswrapper[4687]: E1125 09:04:44.735483 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.821679 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.821722 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.821732 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.821746 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.821755 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:44Z","lastTransitionTime":"2025-11-25T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.924040 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.924073 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.924082 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.924095 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:44 crc kubenswrapper[4687]: I1125 09:04:44.924105 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:44Z","lastTransitionTime":"2025-11-25T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.027680 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.027749 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.027772 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.027963 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.028012 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:45Z","lastTransitionTime":"2025-11-25T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.131227 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.131303 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.131321 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.131345 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.131363 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:45Z","lastTransitionTime":"2025-11-25T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.234065 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.234148 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.234180 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.234212 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.234238 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:45Z","lastTransitionTime":"2025-11-25T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.337485 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.337550 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.337564 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.337581 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.337594 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:45Z","lastTransitionTime":"2025-11-25T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.440045 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.440103 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.440116 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.440137 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.440153 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:45Z","lastTransitionTime":"2025-11-25T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.543213 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.543283 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.543300 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.543323 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.543340 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:45Z","lastTransitionTime":"2025-11-25T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.646444 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.646553 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.646578 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.646612 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.646636 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:45Z","lastTransitionTime":"2025-11-25T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.750081 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.750205 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.750225 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.750249 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.750269 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:45Z","lastTransitionTime":"2025-11-25T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.750219 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.767910 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.787665 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:39Z\\\",\\\"message\\\":\\\"2025-11-25T09:03:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964\\\\n2025-11-25T09:03:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964 to /host/opt/cni/bin/\\\\n2025-11-25T09:03:54Z [verbose] multus-daemon started\\\\n2025-11-25T09:03:54Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:04:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.799454 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.814292 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15c26146-fc78-4141-8156-dbfb4c379668\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fed588c0fe3b1c0c10c1ff1a154c1ae83a483da23e9b38c0b919a2b293a76b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f9867bd6213ab7153f99348331e4fedb1cac3236436c454ab2974ee3fdb1d9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ecdc1a162e491f6c98ecee45e49897884410f336f2735113e00d110f86e0e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.830002 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.843542 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.852467 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.852548 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.852567 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.852594 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.852615 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:45Z","lastTransitionTime":"2025-11-25T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.859704 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.874226 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.889306 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.902816 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.920351 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.940535 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.954192 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.956232 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.956289 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.956304 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.956325 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.956340 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:45Z","lastTransitionTime":"2025-11-25T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.971988 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:45 crc kubenswrapper[4687]: I1125 09:04:45.998158 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"message\\\":\\\"b\\\\nI1125 09:04:19.660645 6361 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:19.660678 6361 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1125 09:04:19.660680 6361 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:19.660661 6361 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:19.660694 6361 base_network_controller_pods.go:916] Annot\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.012661 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.059453 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.059524 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.059543 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.059566 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.059581 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:46Z","lastTransitionTime":"2025-11-25T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.162168 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.162211 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.162243 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.162266 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.162281 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:46Z","lastTransitionTime":"2025-11-25T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.265345 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.265396 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.265408 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.265427 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.265441 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:46Z","lastTransitionTime":"2025-11-25T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.368437 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.368569 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.368591 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.368615 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.368633 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:46Z","lastTransitionTime":"2025-11-25T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.471314 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.471349 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.471359 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.471376 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.471388 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:46Z","lastTransitionTime":"2025-11-25T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.574138 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.574180 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.574195 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.574215 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.574229 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:46Z","lastTransitionTime":"2025-11-25T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.676653 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.676724 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.676741 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.676762 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.676778 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:46Z","lastTransitionTime":"2025-11-25T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.734010 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.734057 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.734110 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:46 crc kubenswrapper[4687]: E1125 09:04:46.734197 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.734229 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:46 crc kubenswrapper[4687]: E1125 09:04:46.734385 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:46 crc kubenswrapper[4687]: E1125 09:04:46.734411 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:46 crc kubenswrapper[4687]: E1125 09:04:46.734470 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.779627 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.779682 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.779692 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.779709 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.779722 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:46Z","lastTransitionTime":"2025-11-25T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.883550 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.883615 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.883626 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.883647 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.883658 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:46Z","lastTransitionTime":"2025-11-25T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.988923 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.988981 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.989002 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.989033 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:46 crc kubenswrapper[4687]: I1125 09:04:46.989052 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:46Z","lastTransitionTime":"2025-11-25T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.092725 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.092787 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.092798 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.092819 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.092830 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:47Z","lastTransitionTime":"2025-11-25T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.196555 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.196618 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.196629 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.196652 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.196666 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:47Z","lastTransitionTime":"2025-11-25T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.298928 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.298980 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.298994 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.299014 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.299027 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:47Z","lastTransitionTime":"2025-11-25T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.402443 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.402490 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.402521 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.402540 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.402552 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:47Z","lastTransitionTime":"2025-11-25T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.505258 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.505334 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.505356 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.505379 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.505395 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:47Z","lastTransitionTime":"2025-11-25T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.609139 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.609215 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.609236 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.609261 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.609279 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:47Z","lastTransitionTime":"2025-11-25T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.712587 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.712628 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.712637 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.712652 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.712663 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:47Z","lastTransitionTime":"2025-11-25T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.735014 4687 scope.go:117] "RemoveContainer" containerID="8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.816756 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.817399 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.817430 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.817469 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.817496 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:47Z","lastTransitionTime":"2025-11-25T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.921118 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.921206 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.921225 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.921248 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:47 crc kubenswrapper[4687]: I1125 09:04:47.921265 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:47Z","lastTransitionTime":"2025-11-25T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.024497 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.024624 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.024643 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.024699 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.024719 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:48Z","lastTransitionTime":"2025-11-25T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.127844 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.127923 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.127945 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.127980 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.128002 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:48Z","lastTransitionTime":"2025-11-25T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.232442 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.232480 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.232520 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.232537 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.232549 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:48Z","lastTransitionTime":"2025-11-25T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.260423 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/2.log" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.264211 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219"} Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.264846 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.284816 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.304465 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.322650 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.335822 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.335862 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.335876 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.335897 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.335916 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:48Z","lastTransitionTime":"2025-11-25T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.340740 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.355559 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.381854 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"message\\\":\\\"b\\\\nI1125 09:04:19.660645 6361 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:19.660678 6361 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1125 09:04:19.660680 6361 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:19.660661 6361 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:19.660694 6361 base_network_controller_pods.go:916] Annot\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.394171 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.418030 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.434772 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:39Z\\\",\\\"message\\\":\\\"2025-11-25T09:03:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964\\\\n2025-11-25T09:03:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964 to /host/opt/cni/bin/\\\\n2025-11-25T09:03:54Z [verbose] multus-daemon started\\\\n2025-11-25T09:03:54Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:04:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.438776 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.438818 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.438827 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.438842 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.438852 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:48Z","lastTransitionTime":"2025-11-25T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.449623 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.467318 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.479425 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.490773 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.502617 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15c26146-fc78-4141-8156-dbfb4c379668\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fed588c0fe3b1c0c10c1ff1a154c1ae83a483da23e9b38c0b919a2b293a76b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f9867bd6213ab7153f99348331e4fedb1cac3236436c454ab2974ee3fdb1d9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ecdc1a162e491f6c98ecee45e49897884410f336f2735113e00d110f86e0e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.514757 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.525309 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.539213 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:48Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.541339 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.541382 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.541394 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.541411 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.541425 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:48Z","lastTransitionTime":"2025-11-25T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.557397 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.557591 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:52.557532987 +0000 UTC m=+147.611172865 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.557716 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.557839 4687 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.558569 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:52.558281627 +0000 UTC m=+147.611921385 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.644874 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.644939 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.644955 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.644980 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.644997 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:48Z","lastTransitionTime":"2025-11-25T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.659526 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.659583 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.659634 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.659759 4687 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.659793 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.659797 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.659828 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.659843 4687 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.659810 4687 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.659872 4687 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.659859 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:52.659833434 +0000 UTC m=+147.713473162 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.659940 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:52.659907575 +0000 UTC m=+147.713547293 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.659953 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:52.659947406 +0000 UTC m=+147.713587124 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.734489 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.734638 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.734763 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.734831 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.734863 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.734939 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.735056 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:48 crc kubenswrapper[4687]: E1125 09:04:48.735205 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.747469 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.747556 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.747575 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.747598 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.747616 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:48Z","lastTransitionTime":"2025-11-25T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.851232 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.851290 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.851307 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.851330 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:48 crc kubenswrapper[4687]: I1125 09:04:48.851348 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:48Z","lastTransitionTime":"2025-11-25T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:48.954324 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:48.954366 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:48.954378 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:48.954395 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:48.954407 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:48Z","lastTransitionTime":"2025-11-25T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.058591 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.058633 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.058647 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.058665 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.058677 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:49Z","lastTransitionTime":"2025-11-25T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.162544 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.162663 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.162681 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.162733 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.162749 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:49Z","lastTransitionTime":"2025-11-25T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.266043 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.266116 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.266127 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.266144 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.266157 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:49Z","lastTransitionTime":"2025-11-25T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.269780 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/3.log" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.270649 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/2.log" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.275352 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219" exitCode=1 Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.275409 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.275469 4687 scope.go:117] "RemoveContainer" containerID="8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.276407 4687 scope.go:117] "RemoveContainer" containerID="b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219" Nov 25 09:04:49 crc kubenswrapper[4687]: E1125 09:04:49.276645 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.300401 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.320923 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.338265 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.353156 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.369570 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.369653 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.369664 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.369686 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.369727 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:49Z","lastTransitionTime":"2025-11-25T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.370860 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15c26146-fc78-4141-8156-dbfb4c379668\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fed588c0fe3b1c0c10c1ff1a154c1ae83a483da23e9b38c0b919a2b293a76b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f9867bd6213ab7153f99348331e4fedb1cac3236436c454ab2974ee3fdb1d9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ecdc1a162e491f6c98ecee45e49897884410f336f2735113e00d110f86e0e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.386433 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.403578 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.419305 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.435662 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.455633 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.471852 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.473350 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.473400 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.473415 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.473441 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.473456 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:49Z","lastTransitionTime":"2025-11-25T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.483710 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.500990 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.522425 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8abd6908316695147c15b437489d344863752499515f1c18b0cd4072116701ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"message\\\":\\\"b\\\\nI1125 09:04:19.660645 6361 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:19.660678 6361 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1125 09:04:19.660680 6361 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:19.660661 6361 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:19.660694 6361 base_network_controller_pods.go:916] Annot\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:48Z\\\",\\\"message\\\":\\\"4.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:NB_Global Row:map[] Rows:[] Columns:[] Mutations:[{Column:nb_cfg Mutator:+= Value:1}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6011affd-30a6-4be6-872d-e4cf1ca780cf}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:48.604083 6749 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:48.604094 6749 ovn.go:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI1125 09:04:48.604101 6749 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:48.604107 6749 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1125 09:04:48.604104 6749 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.536144 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.553712 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.570123 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:39Z\\\",\\\"message\\\":\\\"2025-11-25T09:03:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964\\\\n2025-11-25T09:03:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964 to /host/opt/cni/bin/\\\\n2025-11-25T09:03:54Z [verbose] multus-daemon started\\\\n2025-11-25T09:03:54Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:04:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.575862 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.575896 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.575907 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.575925 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.575943 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:49Z","lastTransitionTime":"2025-11-25T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.678541 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.678576 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.678585 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.678597 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.678606 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:49Z","lastTransitionTime":"2025-11-25T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.781126 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.781181 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.781198 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.781221 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.781240 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:49Z","lastTransitionTime":"2025-11-25T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.884601 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.884656 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.884671 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.884695 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.884710 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:49Z","lastTransitionTime":"2025-11-25T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.987064 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.987121 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.987135 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.987153 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:49 crc kubenswrapper[4687]: I1125 09:04:49.987168 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:49Z","lastTransitionTime":"2025-11-25T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.089784 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.089837 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.089848 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.089868 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.089879 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:50Z","lastTransitionTime":"2025-11-25T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.195884 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.195931 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.195942 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.195963 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.195975 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:50Z","lastTransitionTime":"2025-11-25T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.280630 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/3.log" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.285147 4687 scope.go:117] "RemoveContainer" containerID="b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219" Nov 25 09:04:50 crc kubenswrapper[4687]: E1125 09:04:50.285416 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.300155 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.300220 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.300238 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.300262 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.300282 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:50Z","lastTransitionTime":"2025-11-25T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.306146 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.329986 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.351375 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:39Z\\\",\\\"message\\\":\\\"2025-11-25T09:03:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964\\\\n2025-11-25T09:03:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964 to /host/opt/cni/bin/\\\\n2025-11-25T09:03:54Z [verbose] multus-daemon started\\\\n2025-11-25T09:03:54Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:04:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.377059 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.395120 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.404165 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.404213 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.404230 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.404253 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.404273 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:50Z","lastTransitionTime":"2025-11-25T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.408754 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.424572 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15c26146-fc78-4141-8156-dbfb4c379668\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fed588c0fe3b1c0c10c1ff1a154c1ae83a483da23e9b38c0b919a2b293a76b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f9867bd6213ab7153f99348331e4fedb1cac3236436c454ab2974ee3fdb1d9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ecdc1a162e491f6c98ecee45e49897884410f336f2735113e00d110f86e0e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.442121 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.459455 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.477535 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.491635 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.503963 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.507874 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.507930 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.507946 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.507971 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.507992 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:50Z","lastTransitionTime":"2025-11-25T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.517131 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.527346 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.542398 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.565209 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:48Z\\\",\\\"message\\\":\\\"4.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:NB_Global Row:map[] Rows:[] Columns:[] Mutations:[{Column:nb_cfg Mutator:+= Value:1}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6011affd-30a6-4be6-872d-e4cf1ca780cf}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:48.604083 6749 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:48.604094 6749 ovn.go:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI1125 09:04:48.604101 6749 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:48.604107 6749 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1125 09:04:48.604104 6749 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.577032 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.610420 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.610462 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.610475 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.610491 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.610529 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:50Z","lastTransitionTime":"2025-11-25T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.713623 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.713992 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.714020 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.714043 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.714061 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:50Z","lastTransitionTime":"2025-11-25T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.734456 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.734484 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.734584 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.734544 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:50 crc kubenswrapper[4687]: E1125 09:04:50.734691 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:50 crc kubenswrapper[4687]: E1125 09:04:50.735015 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:50 crc kubenswrapper[4687]: E1125 09:04:50.735173 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:50 crc kubenswrapper[4687]: E1125 09:04:50.735299 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.817821 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.817919 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.817931 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.817979 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.817994 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:50Z","lastTransitionTime":"2025-11-25T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.921557 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.921622 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.921641 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.921711 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:50 crc kubenswrapper[4687]: I1125 09:04:50.921736 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:50Z","lastTransitionTime":"2025-11-25T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.025112 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.025167 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.025183 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.025207 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.025224 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:51Z","lastTransitionTime":"2025-11-25T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.128211 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.128260 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.128271 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.128287 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.128300 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:51Z","lastTransitionTime":"2025-11-25T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.230996 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.231037 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.231049 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.231064 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.231074 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:51Z","lastTransitionTime":"2025-11-25T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.334553 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.334678 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.334698 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.334730 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.334753 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:51Z","lastTransitionTime":"2025-11-25T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.439743 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.439826 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.439845 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.439876 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.439899 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:51Z","lastTransitionTime":"2025-11-25T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.542792 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.542834 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.542842 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.542857 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.542866 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:51Z","lastTransitionTime":"2025-11-25T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.645267 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.645356 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.645379 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.645411 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.645435 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:51Z","lastTransitionTime":"2025-11-25T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.748947 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.749035 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.749060 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.749092 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.749115 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:51Z","lastTransitionTime":"2025-11-25T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.760773 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.852702 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.852749 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.852761 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.852775 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.852784 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:51Z","lastTransitionTime":"2025-11-25T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.955566 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.955618 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.955630 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.955653 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:51 crc kubenswrapper[4687]: I1125 09:04:51.955666 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:51Z","lastTransitionTime":"2025-11-25T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.058758 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.058847 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.058868 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.058894 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.058912 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.161591 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.161668 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.161690 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.161720 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.161741 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.264293 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.264364 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.264385 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.264411 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.264435 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.367480 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.367705 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.367784 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.367823 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.367907 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.471567 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.471636 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.471660 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.471692 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.471714 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.575234 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.575287 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.575304 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.575326 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.575341 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.678441 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.678533 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.678551 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.678573 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.678589 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.734593 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.734654 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.734738 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:52 crc kubenswrapper[4687]: E1125 09:04:52.734755 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.734781 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:52 crc kubenswrapper[4687]: E1125 09:04:52.734922 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:52 crc kubenswrapper[4687]: E1125 09:04:52.735080 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:52 crc kubenswrapper[4687]: E1125 09:04:52.735200 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.781446 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.781542 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.781563 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.781589 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.781611 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.874337 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.874403 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.874419 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.874442 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.874459 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: E1125 09:04:52.891695 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.896655 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.896731 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.896749 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.896774 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.896792 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: E1125 09:04:52.911217 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.915295 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.915358 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.915375 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.915399 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.915416 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: E1125 09:04:52.933264 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.937370 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.937430 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.937445 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.937467 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.937483 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: E1125 09:04:52.951409 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.955056 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.955082 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.955091 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.955103 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.955112 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:52 crc kubenswrapper[4687]: E1125 09:04:52.968369 4687 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"59433806-9cd9-44e7-8e27-d03eb8a2fcda\\\",\\\"systemUUID\\\":\\\"3d8948be-6f27-4904-9fe7-1878681451c2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:52 crc kubenswrapper[4687]: E1125 09:04:52.968522 4687 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.970072 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.970096 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.970106 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.970122 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:52 crc kubenswrapper[4687]: I1125 09:04:52.970133 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:52Z","lastTransitionTime":"2025-11-25T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.073408 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.073462 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.073476 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.073502 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.073532 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:53Z","lastTransitionTime":"2025-11-25T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.176562 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.176602 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.176619 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.176638 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.176650 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:53Z","lastTransitionTime":"2025-11-25T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.279797 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.279842 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.279853 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.279873 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.279885 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:53Z","lastTransitionTime":"2025-11-25T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.383372 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.383509 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.383567 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.383595 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.383613 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:53Z","lastTransitionTime":"2025-11-25T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.485965 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.486025 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.486043 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.486064 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.486084 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:53Z","lastTransitionTime":"2025-11-25T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.588851 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.588892 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.588903 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.588917 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.588925 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:53Z","lastTransitionTime":"2025-11-25T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.691083 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.691116 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.691126 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.691138 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.691146 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:53Z","lastTransitionTime":"2025-11-25T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.750045 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.793890 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.793961 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.793979 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.794003 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.794023 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:53Z","lastTransitionTime":"2025-11-25T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.897237 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.897310 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.897333 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.897362 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:53 crc kubenswrapper[4687]: I1125 09:04:53.897384 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:53Z","lastTransitionTime":"2025-11-25T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.001285 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.001346 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.001363 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.001387 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.001404 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:54Z","lastTransitionTime":"2025-11-25T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.104575 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.104624 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.104665 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.104686 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.104701 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:54Z","lastTransitionTime":"2025-11-25T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.208122 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.208201 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.208223 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.208250 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.208269 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:54Z","lastTransitionTime":"2025-11-25T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.310607 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.310651 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.310666 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.310685 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.310699 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:54Z","lastTransitionTime":"2025-11-25T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.413858 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.413976 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.413996 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.414020 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.414038 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:54Z","lastTransitionTime":"2025-11-25T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.517386 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.517452 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.517470 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.517497 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.517546 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:54Z","lastTransitionTime":"2025-11-25T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.621664 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.621770 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.621796 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.621882 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.621908 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:54Z","lastTransitionTime":"2025-11-25T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.723878 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.723929 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.723937 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.723949 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.723975 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:54Z","lastTransitionTime":"2025-11-25T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.734558 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.734622 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.734653 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.734585 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:54 crc kubenswrapper[4687]: E1125 09:04:54.734683 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:54 crc kubenswrapper[4687]: E1125 09:04:54.734775 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:54 crc kubenswrapper[4687]: E1125 09:04:54.734811 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:54 crc kubenswrapper[4687]: E1125 09:04:54.734891 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.826840 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.826871 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.826884 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.826898 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.826906 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:54Z","lastTransitionTime":"2025-11-25T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.929642 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.929711 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.929731 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.929755 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:54 crc kubenswrapper[4687]: I1125 09:04:54.929773 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:54Z","lastTransitionTime":"2025-11-25T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.032652 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.032716 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.032735 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.032759 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.032777 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:55Z","lastTransitionTime":"2025-11-25T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.135621 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.135700 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.135723 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.135754 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.135784 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:55Z","lastTransitionTime":"2025-11-25T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.239691 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.239768 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.239786 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.239812 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.239829 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:55Z","lastTransitionTime":"2025-11-25T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.343297 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.343381 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.343404 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.343435 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.343457 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:55Z","lastTransitionTime":"2025-11-25T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.447114 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.447202 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.447235 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.447268 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.447289 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:55Z","lastTransitionTime":"2025-11-25T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.550702 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.550779 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.550799 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.550824 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.550846 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:55Z","lastTransitionTime":"2025-11-25T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.653954 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.654036 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.654059 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.654090 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.654116 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:55Z","lastTransitionTime":"2025-11-25T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.757482 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:03:46Z\\\",\\\"message\\\":\\\"file observer\\\\nW1125 09:03:45.627027 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:03:45.627241 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:03:45.628350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-570829990/tls.crt::/tmp/serving-cert-570829990/tls.key\\\\\\\"\\\\nI1125 09:03:46.054227 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:03:46.057065 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:03:46.057090 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:03:46.057115 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:03:46.057122 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:03:46.069544 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:03:46.069575 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069582 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:03:46.069588 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:03:46.069592 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:03:46.069596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:03:46.069599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:03:46.069807 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:03:46.071016 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.758062 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.758112 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.758136 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.758171 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.758192 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:55Z","lastTransitionTime":"2025-11-25T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.778298 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ab5679dc320d70d0c9b74e1be059570954c561f821e4d1a13da50b42df9204a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b24158824ee52296d6ff5a1e6377eb27998066dfb8b105bd59143ffaa6bfc7f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.793487 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9c3b7b-52fe-4bc4-ae61-2ab61dc1ac0d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1db0dfb1f45de10d83b0cf1bc0fb955f0fb038b5e312fad92e0ea1d929a9da72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d8fb345623a1675a270fa418e542f6d6379ba05dc936f771a117a80e0390d70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h78z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xg6dn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.809315 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0d37b1c-f114-4cc7-afa6-93377124bd2b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b0936b99680485c59502e5c0a76d33bc48294cfd52e3c913a0120b62a856a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f90f83416838f8073253a8a9f36244bc6810976a24dd836a7e4a1f1d685e60d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f90f83416838f8073253a8a9f36244bc6810976a24dd836a7e4a1f1d685e60d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.843412 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38ce12c5-20dc-4eb6-a834-340c13439593\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee0d0493d08d4eaf2f9d0e476b45c160c958469c183757219b787f09ab152cca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3ceff46ab6cff536f160ccbc3050e88810cf658c0dd96fc8fcd3be619a87ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ddb66748f9c9e61538d9b8fa023f3842558557989f7c54e5a713d1a66407165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8debcf38910327db36439311e2eb60d65f8635a265251e0000868ca0ac08a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b815780087126b2c8d8b8ec45b673e753d8b1563a96e0dd9381cffa9e49f331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4915aa39d3cae307d0bbdcec35149536659bff2d627bbeb3b3efcf4309ed922c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4915aa39d3cae307d0bbdcec35149536659bff2d627bbeb3b3efcf4309ed922c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbb106924fdb53a6998ffb913dafedfe33521fe69f4b401022f0c58f979691ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbb106924fdb53a6998ffb913dafedfe33521fe69f4b401022f0c58f979691ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fc9e1af593911bf14a2c855a6733b4bb4d22d52e257560aed8b84f66818e95ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc9e1af593911bf14a2c855a6733b4bb4d22d52e257560aed8b84f66818e95ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.860709 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.860754 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.860764 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.860783 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.860796 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:55Z","lastTransitionTime":"2025-11-25T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.863403 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f9afc4f5f6ddfdbbe7bb3fde43cf3ef3415f9e83b0527ae057d7e493ff43890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.889241 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d371271f-84c3-405c-b41f-604a06c1bb71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:48Z\\\",\\\"message\\\":\\\"4.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:NB_Global Row:map[] Rows:[] Columns:[] Mutations:[{Column:nb_cfg Mutator:+= Value:1}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {6011affd-30a6-4be6-872d-e4cf1ca780cf}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 09:04:48.604083 6749 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 09:04:48.604094 6749 ovn.go:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI1125 09:04:48.604101 6749 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1125 09:04:48.604107 6749 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1125 09:04:48.604104 6749 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:04:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9jb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-p68hx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.905249 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cscrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0433643a-5ed9-485b-a788-51de4a92f461\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9htjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:04:04Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cscrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.920023 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://654bb9115ab9fc3e71973c6c789dd6a698596ae7e9ecbd3e1cc64f9c009f46f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xlsnj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vcqct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.939403 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9736e597-ba61-47a5-b1e2-02b151c5cac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6deb9fb2338309b8853ca3e8d21b898242891dec946ed138ae7ed5d0c473c8ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23e28eb9789eaf63524e2d17c8b60f7db064c8c74b93e62ece8bf24bc16fde8d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7bf79eb996000c81ecb5137be22ef7e0c399759efb4dbd375813a4a1368c4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://365a2b429bf7b2d3cad0bb2d7e507875bf7312c1cbaa69e0a8665a1717927fc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4cc6b0ff00fcf62abab4eb8dab6f4ba5984001021dbf281cf0a8ab9928266b29\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35b732ddb432cd864a376ff9b76b7ee8996d7fa4b49c5d2c6ee92c976f364b77\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2091b49523a47f9c07629e685c88f539ca2968944cb0e78750292d630772df7f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:04:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8ncmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-g8h9r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.955379 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wlzrb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e7c96e4-c7fa-466f-b0b6-495612ed71f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:04:39Z\\\",\\\"message\\\":\\\"2025-11-25T09:03:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964\\\\n2025-11-25T09:03:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_64a906f1-5136-405a-9ddd-1ae4c3a4f964 to /host/opt/cni/bin/\\\\n2025-11-25T09:03:54Z [verbose] multus-daemon started\\\\n2025-11-25T09:03:54Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:04:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:04:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cprtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wlzrb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.963821 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.963895 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.963919 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.963947 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.963970 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:55Z","lastTransitionTime":"2025-11-25T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.969463 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.984601 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9zmf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37ae5d60-327b-4f2d-83c3-bd775960a7ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5564bbd06f4b5875d55ab9b803a6981c9f3b48eade552831b6ca6533d64caeb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q4r75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9zmf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:55 crc kubenswrapper[4687]: I1125 09:04:55.995234 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pjj4b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c8f16526-df37-4a3b-9fc6-010c96296946\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5eeaed17dd204bd6f8917c0b9fb2db31c02a9df79a184dce8f481bbd039354f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbqmj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pjj4b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.007567 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15c26146-fc78-4141-8156-dbfb4c379668\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:04:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fed588c0fe3b1c0c10c1ff1a154c1ae83a483da23e9b38c0b919a2b293a76b21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f9867bd6213ab7153f99348331e4fedb1cac3236436c454ab2974ee3fdb1d9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ecdc1a162e491f6c98ecee45e49897884410f336f2735113e00d110f86e0e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c73656043e620a3ebc7a9dad78446f135c8c5309492451264d79fc29a0870b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.022977 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"093c8062-c80c-41ae-8ab1-a8e14ba3c90b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://536c3f9073f95b37893353f9e1f6b0c72cb16fc121d925cb4cd9f2e9823705a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d89588390e3e7f0c6709525bc16570ab1bde16089c15560965eb80efa06b18c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa8f6d8e6cd3f946411b8d646c094d23f7af525e40d25538cc82eaff9875a1d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:03:25Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.042166 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.052763 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:47Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f0fc4b61539907c0628c0cd6d318f2165c4f359d161986049a3048bccb95909\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.065037 4687 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:03:44Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:04:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.066665 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.066716 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.066724 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.066737 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.066746 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:56Z","lastTransitionTime":"2025-11-25T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.169258 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.169290 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.169299 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.169311 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.169320 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:56Z","lastTransitionTime":"2025-11-25T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.271931 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.271993 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.272011 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.272035 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.272054 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:56Z","lastTransitionTime":"2025-11-25T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.374093 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.374163 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.374187 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.374219 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.374242 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:56Z","lastTransitionTime":"2025-11-25T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.477550 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.477607 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.477623 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.477643 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.477661 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:56Z","lastTransitionTime":"2025-11-25T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.579914 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.579971 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.579987 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.580006 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.580018 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:56Z","lastTransitionTime":"2025-11-25T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.682684 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.682724 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.682737 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.682752 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.682765 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:56Z","lastTransitionTime":"2025-11-25T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.734750 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.734904 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:56 crc kubenswrapper[4687]: E1125 09:04:56.734940 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:56 crc kubenswrapper[4687]: E1125 09:04:56.735131 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.734750 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.734756 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:56 crc kubenswrapper[4687]: E1125 09:04:56.735251 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:56 crc kubenswrapper[4687]: E1125 09:04:56.735336 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.785110 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.785173 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.785192 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.785216 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.785233 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:56Z","lastTransitionTime":"2025-11-25T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.887860 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.888330 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.888618 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.888857 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.889065 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:56Z","lastTransitionTime":"2025-11-25T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.991715 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.991742 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.991772 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.991787 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:56 crc kubenswrapper[4687]: I1125 09:04:56.991796 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:56Z","lastTransitionTime":"2025-11-25T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.095151 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.095231 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.095254 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.095284 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.095302 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:57Z","lastTransitionTime":"2025-11-25T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.201638 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.201687 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.201696 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.201721 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.201738 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:57Z","lastTransitionTime":"2025-11-25T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.304048 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.304131 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.304163 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.304192 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.304210 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:57Z","lastTransitionTime":"2025-11-25T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.408117 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.408164 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.408179 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.408198 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.408213 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:57Z","lastTransitionTime":"2025-11-25T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.511342 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.511404 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.511413 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.511427 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.511436 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:57Z","lastTransitionTime":"2025-11-25T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.619788 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.620529 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.620567 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.620598 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.620619 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:57Z","lastTransitionTime":"2025-11-25T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.723132 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.723186 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.723196 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.723211 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.723220 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:57Z","lastTransitionTime":"2025-11-25T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.826716 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.826798 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.826822 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.826850 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.826873 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:57Z","lastTransitionTime":"2025-11-25T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.930341 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.930426 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.930458 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.930487 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:57 crc kubenswrapper[4687]: I1125 09:04:57.930564 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:57Z","lastTransitionTime":"2025-11-25T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.033799 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.033847 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.033860 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.033879 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.033892 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:58Z","lastTransitionTime":"2025-11-25T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.136936 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.137092 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.137120 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.137150 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.137171 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:58Z","lastTransitionTime":"2025-11-25T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.239902 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.239962 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.239980 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.240004 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.240021 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:58Z","lastTransitionTime":"2025-11-25T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.342861 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.342925 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.342943 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.342967 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.342985 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:58Z","lastTransitionTime":"2025-11-25T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.446101 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.446181 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.446203 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.446231 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.446252 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:58Z","lastTransitionTime":"2025-11-25T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.549394 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.549446 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.549460 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.549480 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.549498 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:58Z","lastTransitionTime":"2025-11-25T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.653222 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.653272 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.653284 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.653298 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.653309 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:58Z","lastTransitionTime":"2025-11-25T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.734248 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.734274 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.734315 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:04:58 crc kubenswrapper[4687]: E1125 09:04:58.734379 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.734433 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:04:58 crc kubenswrapper[4687]: E1125 09:04:58.734484 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:04:58 crc kubenswrapper[4687]: E1125 09:04:58.734655 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:04:58 crc kubenswrapper[4687]: E1125 09:04:58.734767 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.756176 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.756245 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.756271 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.756299 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.756321 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:58Z","lastTransitionTime":"2025-11-25T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.859786 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.859901 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.859916 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.859936 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.859951 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:58Z","lastTransitionTime":"2025-11-25T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.962402 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.962458 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.962475 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.962497 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:58 crc kubenswrapper[4687]: I1125 09:04:58.962554 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:58Z","lastTransitionTime":"2025-11-25T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.064985 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.065093 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.065110 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.065138 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.065156 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:59Z","lastTransitionTime":"2025-11-25T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.167760 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.167829 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.167850 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.167878 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.167900 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:59Z","lastTransitionTime":"2025-11-25T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.270471 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.270672 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.270694 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.270717 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.270736 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:59Z","lastTransitionTime":"2025-11-25T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.373107 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.373167 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.373184 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.373208 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.373229 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:59Z","lastTransitionTime":"2025-11-25T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.475779 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.475826 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.475837 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.475855 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.475869 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:59Z","lastTransitionTime":"2025-11-25T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.578421 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.578466 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.578481 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.578539 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.578564 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:59Z","lastTransitionTime":"2025-11-25T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.688089 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.688141 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.688153 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.688170 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.688183 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:59Z","lastTransitionTime":"2025-11-25T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.790621 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.790708 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.790729 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.790756 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.790775 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:59Z","lastTransitionTime":"2025-11-25T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.893659 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.893960 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.894048 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.894134 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.894221 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:59Z","lastTransitionTime":"2025-11-25T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.997850 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.997917 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.997938 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.997967 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:04:59 crc kubenswrapper[4687]: I1125 09:04:59.997989 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:04:59Z","lastTransitionTime":"2025-11-25T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.103120 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.103198 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.103212 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.103227 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.103242 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:00Z","lastTransitionTime":"2025-11-25T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.207261 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.207332 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.207350 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.207382 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.207405 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:00Z","lastTransitionTime":"2025-11-25T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.310822 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.310888 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.310912 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.310942 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.310963 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:00Z","lastTransitionTime":"2025-11-25T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.413588 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.413639 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.413657 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.413677 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.413693 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:00Z","lastTransitionTime":"2025-11-25T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.517236 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.517744 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.517935 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.518124 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.518316 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:00Z","lastTransitionTime":"2025-11-25T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.621222 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.622281 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.622453 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.622695 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.622875 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:00Z","lastTransitionTime":"2025-11-25T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.725553 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.725587 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.725598 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.725613 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.725625 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:00Z","lastTransitionTime":"2025-11-25T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.733697 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.733765 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.734070 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.734151 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:00 crc kubenswrapper[4687]: E1125 09:05:00.734295 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.734404 4687 scope.go:117] "RemoveContainer" containerID="b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219" Nov 25 09:05:00 crc kubenswrapper[4687]: E1125 09:05:00.734577 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" Nov 25 09:05:00 crc kubenswrapper[4687]: E1125 09:05:00.734679 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:00 crc kubenswrapper[4687]: E1125 09:05:00.734821 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:00 crc kubenswrapper[4687]: E1125 09:05:00.734929 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.828243 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.828273 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.828280 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.828294 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.828302 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:00Z","lastTransitionTime":"2025-11-25T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.930713 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.930775 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.930797 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.930821 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:00 crc kubenswrapper[4687]: I1125 09:05:00.930839 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:00Z","lastTransitionTime":"2025-11-25T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.033755 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.033800 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.033811 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.033828 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.033841 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:01Z","lastTransitionTime":"2025-11-25T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.137076 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.137145 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.137170 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.137197 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.137219 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:01Z","lastTransitionTime":"2025-11-25T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.240550 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.240593 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.240635 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.240654 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.240667 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:01Z","lastTransitionTime":"2025-11-25T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.344128 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.344196 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.344209 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.344231 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.344247 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:01Z","lastTransitionTime":"2025-11-25T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.446785 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.446863 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.446887 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.446917 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.446941 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:01Z","lastTransitionTime":"2025-11-25T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.550359 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.550438 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.550461 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.550495 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.550608 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:01Z","lastTransitionTime":"2025-11-25T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.653637 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.653714 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.653732 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.653759 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.653777 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:01Z","lastTransitionTime":"2025-11-25T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.756818 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.756881 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.756899 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.756920 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.756938 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:01Z","lastTransitionTime":"2025-11-25T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.859328 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.859393 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.859410 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.859434 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.859455 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:01Z","lastTransitionTime":"2025-11-25T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.962212 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.962264 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.962276 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.962292 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:01 crc kubenswrapper[4687]: I1125 09:05:01.962303 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:01Z","lastTransitionTime":"2025-11-25T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.065329 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.065393 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.065412 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.065437 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.065454 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:02Z","lastTransitionTime":"2025-11-25T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.169048 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.169135 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.169157 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.169185 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.169208 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:02Z","lastTransitionTime":"2025-11-25T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.273325 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.273399 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.273419 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.273448 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.273471 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:02Z","lastTransitionTime":"2025-11-25T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.377602 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.377672 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.377691 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.377722 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.377743 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:02Z","lastTransitionTime":"2025-11-25T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.481054 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.481095 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.481105 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.481127 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.481139 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:02Z","lastTransitionTime":"2025-11-25T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.583191 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.583235 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.583246 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.583263 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.583276 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:02Z","lastTransitionTime":"2025-11-25T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.686362 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.686413 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.686427 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.686443 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.686459 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:02Z","lastTransitionTime":"2025-11-25T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.734675 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.734743 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:02 crc kubenswrapper[4687]: E1125 09:05:02.734856 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.734921 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:02 crc kubenswrapper[4687]: E1125 09:05:02.735007 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:02 crc kubenswrapper[4687]: E1125 09:05:02.735106 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.735107 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:02 crc kubenswrapper[4687]: E1125 09:05:02.735264 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.790555 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.790614 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.790627 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.790653 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.790666 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:02Z","lastTransitionTime":"2025-11-25T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.894221 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.894290 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.894314 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.894345 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.894368 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:02Z","lastTransitionTime":"2025-11-25T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.997628 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.997669 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.997678 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.997692 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:02 crc kubenswrapper[4687]: I1125 09:05:02.997701 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:02Z","lastTransitionTime":"2025-11-25T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.100349 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.100387 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.100399 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.100418 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.100430 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:03Z","lastTransitionTime":"2025-11-25T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.171695 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.171738 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.171747 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.171761 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.171770 4687 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:05:03Z","lastTransitionTime":"2025-11-25T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.218958 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j"] Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.219380 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.222088 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.222328 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.223111 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.223249 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.233820 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=10.233800104 podStartE2EDuration="10.233800104s" podCreationTimestamp="2025-11-25 09:04:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.233624059 +0000 UTC m=+98.287263777" watchObservedRunningTime="2025-11-25 09:05:03.233800104 +0000 UTC m=+98.287439822" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.259410 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=12.259392562 podStartE2EDuration="12.259392562s" podCreationTimestamp="2025-11-25 09:04:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.25897728 +0000 UTC m=+98.312617008" watchObservedRunningTime="2025-11-25 09:05:03.259392562 +0000 UTC m=+98.313032280" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.314754 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podStartSLOduration=74.314734536 podStartE2EDuration="1m14.314734536s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.314577082 +0000 UTC m=+98.368216800" watchObservedRunningTime="2025-11-25 09:05:03.314734536 +0000 UTC m=+98.368374254" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.328099 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.328188 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.328295 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.328336 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.328355 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.332043 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-g8h9r" podStartSLOduration=74.332012573 podStartE2EDuration="1m14.332012573s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.331983752 +0000 UTC m=+98.385623480" watchObservedRunningTime="2025-11-25 09:05:03.332012573 +0000 UTC m=+98.385652291" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.345666 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-wlzrb" podStartSLOduration=74.345643104 podStartE2EDuration="1m14.345643104s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.344920055 +0000 UTC m=+98.398559783" watchObservedRunningTime="2025-11-25 09:05:03.345643104 +0000 UTC m=+98.399282842" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.356390 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-pjj4b" podStartSLOduration=74.356372018 podStartE2EDuration="1m14.356372018s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.356311226 +0000 UTC m=+98.409950944" watchObservedRunningTime="2025-11-25 09:05:03.356372018 +0000 UTC m=+98.410011726" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.369684 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=42.369662549 podStartE2EDuration="42.369662549s" podCreationTimestamp="2025-11-25 09:04:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.369632438 +0000 UTC m=+98.423272186" watchObservedRunningTime="2025-11-25 09:05:03.369662549 +0000 UTC m=+98.423302267" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.387401 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=78.387372558 podStartE2EDuration="1m18.387372558s" podCreationTimestamp="2025-11-25 09:03:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.386835524 +0000 UTC m=+98.440475232" watchObservedRunningTime="2025-11-25 09:05:03.387372558 +0000 UTC m=+98.441012286" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.428994 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.429046 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.429070 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.429123 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.429136 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.429203 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.429268 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.430151 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.434910 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.464292 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3c34c38e-590d-4e4c-98a2-a0e4eebbf37e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-cv99j\" (UID: \"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.470485 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-9zmf6" podStartSLOduration=74.470464986 podStartE2EDuration="1m14.470464986s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.469729077 +0000 UTC m=+98.523368795" watchObservedRunningTime="2025-11-25 09:05:03.470464986 +0000 UTC m=+98.524104704" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.494714 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=77.494695888 podStartE2EDuration="1m17.494695888s" podCreationTimestamp="2025-11-25 09:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.494209555 +0000 UTC m=+98.547849293" watchObservedRunningTime="2025-11-25 09:05:03.494695888 +0000 UTC m=+98.548335606" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.536714 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" Nov 25 09:05:03 crc kubenswrapper[4687]: I1125 09:05:03.538190 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xg6dn" podStartSLOduration=73.53818153899999 podStartE2EDuration="1m13.538181539s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:03.527613639 +0000 UTC m=+98.581253357" watchObservedRunningTime="2025-11-25 09:05:03.538181539 +0000 UTC m=+98.591821257" Nov 25 09:05:04 crc kubenswrapper[4687]: I1125 09:05:04.336233 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" event={"ID":"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e","Type":"ContainerStarted","Data":"30f811c1d51d5167f9e34e28c59d556d0705dee12cf691b5346825b9d72dd8e9"} Nov 25 09:05:04 crc kubenswrapper[4687]: I1125 09:05:04.336562 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" event={"ID":"3c34c38e-590d-4e4c-98a2-a0e4eebbf37e","Type":"ContainerStarted","Data":"7a97fbb64667c8a8630ae5dc1b19d645c0bbe13886d4943fc2eea85a3f834e61"} Nov 25 09:05:04 crc kubenswrapper[4687]: I1125 09:05:04.352794 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cv99j" podStartSLOduration=75.352770154 podStartE2EDuration="1m15.352770154s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:04.350069962 +0000 UTC m=+99.403709680" watchObservedRunningTime="2025-11-25 09:05:04.352770154 +0000 UTC m=+99.406409892" Nov 25 09:05:04 crc kubenswrapper[4687]: I1125 09:05:04.734182 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:04 crc kubenswrapper[4687]: I1125 09:05:04.734180 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:04 crc kubenswrapper[4687]: I1125 09:05:04.734326 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:04 crc kubenswrapper[4687]: E1125 09:05:04.734452 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:04 crc kubenswrapper[4687]: I1125 09:05:04.734476 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:04 crc kubenswrapper[4687]: E1125 09:05:04.734610 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:04 crc kubenswrapper[4687]: E1125 09:05:04.734900 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:04 crc kubenswrapper[4687]: E1125 09:05:04.735103 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:06 crc kubenswrapper[4687]: I1125 09:05:06.733645 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:06 crc kubenswrapper[4687]: I1125 09:05:06.733725 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:06 crc kubenswrapper[4687]: I1125 09:05:06.733787 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:06 crc kubenswrapper[4687]: E1125 09:05:06.733788 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:06 crc kubenswrapper[4687]: E1125 09:05:06.733865 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:06 crc kubenswrapper[4687]: I1125 09:05:06.733926 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:06 crc kubenswrapper[4687]: E1125 09:05:06.734116 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:06 crc kubenswrapper[4687]: E1125 09:05:06.734295 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:08 crc kubenswrapper[4687]: I1125 09:05:08.377095 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:08 crc kubenswrapper[4687]: E1125 09:05:08.377230 4687 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:05:08 crc kubenswrapper[4687]: E1125 09:05:08.377273 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs podName:0433643a-5ed9-485b-a788-51de4a92f461 nodeName:}" failed. No retries permitted until 2025-11-25 09:06:12.377259516 +0000 UTC m=+167.430899234 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs") pod "network-metrics-daemon-cscrb" (UID: "0433643a-5ed9-485b-a788-51de4a92f461") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:05:08 crc kubenswrapper[4687]: I1125 09:05:08.734676 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:08 crc kubenswrapper[4687]: I1125 09:05:08.734741 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:08 crc kubenswrapper[4687]: I1125 09:05:08.734697 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:08 crc kubenswrapper[4687]: E1125 09:05:08.734803 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:08 crc kubenswrapper[4687]: I1125 09:05:08.734676 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:08 crc kubenswrapper[4687]: E1125 09:05:08.734941 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:08 crc kubenswrapper[4687]: E1125 09:05:08.734998 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:08 crc kubenswrapper[4687]: E1125 09:05:08.735083 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:10 crc kubenswrapper[4687]: I1125 09:05:10.734017 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:10 crc kubenswrapper[4687]: I1125 09:05:10.734064 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:10 crc kubenswrapper[4687]: I1125 09:05:10.734044 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:10 crc kubenswrapper[4687]: I1125 09:05:10.734032 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:10 crc kubenswrapper[4687]: E1125 09:05:10.734243 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:10 crc kubenswrapper[4687]: E1125 09:05:10.734314 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:10 crc kubenswrapper[4687]: E1125 09:05:10.734416 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:10 crc kubenswrapper[4687]: E1125 09:05:10.734670 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:12 crc kubenswrapper[4687]: I1125 09:05:12.734149 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:12 crc kubenswrapper[4687]: I1125 09:05:12.734238 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:12 crc kubenswrapper[4687]: I1125 09:05:12.734303 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:12 crc kubenswrapper[4687]: E1125 09:05:12.734387 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:12 crc kubenswrapper[4687]: I1125 09:05:12.734401 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:12 crc kubenswrapper[4687]: E1125 09:05:12.734591 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:12 crc kubenswrapper[4687]: E1125 09:05:12.734585 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:12 crc kubenswrapper[4687]: E1125 09:05:12.734743 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:13 crc kubenswrapper[4687]: I1125 09:05:13.735530 4687 scope.go:117] "RemoveContainer" containerID="b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219" Nov 25 09:05:13 crc kubenswrapper[4687]: E1125 09:05:13.735804 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" Nov 25 09:05:14 crc kubenswrapper[4687]: I1125 09:05:14.734449 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:14 crc kubenswrapper[4687]: I1125 09:05:14.734521 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:14 crc kubenswrapper[4687]: I1125 09:05:14.734607 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:14 crc kubenswrapper[4687]: I1125 09:05:14.734625 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:14 crc kubenswrapper[4687]: E1125 09:05:14.734707 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:14 crc kubenswrapper[4687]: E1125 09:05:14.734785 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:14 crc kubenswrapper[4687]: E1125 09:05:14.734875 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:14 crc kubenswrapper[4687]: E1125 09:05:14.734987 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:16 crc kubenswrapper[4687]: I1125 09:05:16.734155 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:16 crc kubenswrapper[4687]: I1125 09:05:16.734161 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:16 crc kubenswrapper[4687]: I1125 09:05:16.734279 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:16 crc kubenswrapper[4687]: I1125 09:05:16.734314 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:16 crc kubenswrapper[4687]: E1125 09:05:16.734401 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:16 crc kubenswrapper[4687]: E1125 09:05:16.734605 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:16 crc kubenswrapper[4687]: E1125 09:05:16.734706 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:16 crc kubenswrapper[4687]: E1125 09:05:16.734781 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:18 crc kubenswrapper[4687]: I1125 09:05:18.733950 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:18 crc kubenswrapper[4687]: E1125 09:05:18.734384 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:18 crc kubenswrapper[4687]: I1125 09:05:18.733975 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:18 crc kubenswrapper[4687]: E1125 09:05:18.734466 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:18 crc kubenswrapper[4687]: I1125 09:05:18.733985 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:18 crc kubenswrapper[4687]: E1125 09:05:18.734550 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:18 crc kubenswrapper[4687]: I1125 09:05:18.733968 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:18 crc kubenswrapper[4687]: E1125 09:05:18.734617 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:20 crc kubenswrapper[4687]: I1125 09:05:20.734047 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:20 crc kubenswrapper[4687]: I1125 09:05:20.734088 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:20 crc kubenswrapper[4687]: I1125 09:05:20.734087 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:20 crc kubenswrapper[4687]: E1125 09:05:20.734170 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:20 crc kubenswrapper[4687]: I1125 09:05:20.734252 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:20 crc kubenswrapper[4687]: E1125 09:05:20.734346 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:20 crc kubenswrapper[4687]: E1125 09:05:20.734452 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:20 crc kubenswrapper[4687]: E1125 09:05:20.734571 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:22 crc kubenswrapper[4687]: I1125 09:05:22.734961 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:22 crc kubenswrapper[4687]: I1125 09:05:22.735038 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:22 crc kubenswrapper[4687]: E1125 09:05:22.735182 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:22 crc kubenswrapper[4687]: E1125 09:05:22.735329 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:22 crc kubenswrapper[4687]: I1125 09:05:22.735419 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:22 crc kubenswrapper[4687]: E1125 09:05:22.735680 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:22 crc kubenswrapper[4687]: I1125 09:05:22.736381 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:22 crc kubenswrapper[4687]: E1125 09:05:22.736992 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:24 crc kubenswrapper[4687]: I1125 09:05:24.733798 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:24 crc kubenswrapper[4687]: I1125 09:05:24.733859 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:24 crc kubenswrapper[4687]: I1125 09:05:24.733891 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:24 crc kubenswrapper[4687]: E1125 09:05:24.733971 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:24 crc kubenswrapper[4687]: I1125 09:05:24.734110 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:24 crc kubenswrapper[4687]: E1125 09:05:24.734318 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:24 crc kubenswrapper[4687]: E1125 09:05:24.734411 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:24 crc kubenswrapper[4687]: E1125 09:05:24.734619 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:25 crc kubenswrapper[4687]: E1125 09:05:25.738124 4687 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 25 09:05:25 crc kubenswrapper[4687]: E1125 09:05:25.856605 4687 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.413369 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wlzrb_0e7c96e4-c7fa-466f-b0b6-495612ed71f8/kube-multus/1.log" Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.414391 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wlzrb_0e7c96e4-c7fa-466f-b0b6-495612ed71f8/kube-multus/0.log" Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.414489 4687 generic.go:334] "Generic (PLEG): container finished" podID="0e7c96e4-c7fa-466f-b0b6-495612ed71f8" containerID="fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189" exitCode=1 Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.414603 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wlzrb" event={"ID":"0e7c96e4-c7fa-466f-b0b6-495612ed71f8","Type":"ContainerDied","Data":"fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189"} Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.414741 4687 scope.go:117] "RemoveContainer" containerID="42d122098a66a2bea73eb4b449172e1882e3c98879a046fc19cf12c4db6dcd45" Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.415776 4687 scope.go:117] "RemoveContainer" containerID="fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189" Nov 25 09:05:26 crc kubenswrapper[4687]: E1125 09:05:26.416252 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-wlzrb_openshift-multus(0e7c96e4-c7fa-466f-b0b6-495612ed71f8)\"" pod="openshift-multus/multus-wlzrb" podUID="0e7c96e4-c7fa-466f-b0b6-495612ed71f8" Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.734457 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.734585 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:26 crc kubenswrapper[4687]: E1125 09:05:26.734714 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.734760 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.734837 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:26 crc kubenswrapper[4687]: E1125 09:05:26.735419 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:26 crc kubenswrapper[4687]: E1125 09:05:26.735544 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:26 crc kubenswrapper[4687]: E1125 09:05:26.735688 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:26 crc kubenswrapper[4687]: I1125 09:05:26.735959 4687 scope.go:117] "RemoveContainer" containerID="b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219" Nov 25 09:05:26 crc kubenswrapper[4687]: E1125 09:05:26.736209 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-p68hx_openshift-ovn-kubernetes(d371271f-84c3-405c-b41f-604a06c1bb71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" Nov 25 09:05:27 crc kubenswrapper[4687]: I1125 09:05:27.421925 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wlzrb_0e7c96e4-c7fa-466f-b0b6-495612ed71f8/kube-multus/1.log" Nov 25 09:05:28 crc kubenswrapper[4687]: I1125 09:05:28.733929 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:28 crc kubenswrapper[4687]: I1125 09:05:28.734009 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:28 crc kubenswrapper[4687]: I1125 09:05:28.734009 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:28 crc kubenswrapper[4687]: I1125 09:05:28.734084 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:28 crc kubenswrapper[4687]: E1125 09:05:28.734090 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:28 crc kubenswrapper[4687]: E1125 09:05:28.734186 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:28 crc kubenswrapper[4687]: E1125 09:05:28.734258 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:28 crc kubenswrapper[4687]: E1125 09:05:28.734279 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:30 crc kubenswrapper[4687]: I1125 09:05:30.734323 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:30 crc kubenswrapper[4687]: I1125 09:05:30.734347 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:30 crc kubenswrapper[4687]: E1125 09:05:30.734431 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:30 crc kubenswrapper[4687]: I1125 09:05:30.734324 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:30 crc kubenswrapper[4687]: E1125 09:05:30.734703 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:30 crc kubenswrapper[4687]: I1125 09:05:30.734738 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:30 crc kubenswrapper[4687]: E1125 09:05:30.734827 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:30 crc kubenswrapper[4687]: E1125 09:05:30.735024 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:30 crc kubenswrapper[4687]: E1125 09:05:30.858080 4687 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:05:32 crc kubenswrapper[4687]: I1125 09:05:32.734465 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:32 crc kubenswrapper[4687]: I1125 09:05:32.734580 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:32 crc kubenswrapper[4687]: I1125 09:05:32.734649 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:32 crc kubenswrapper[4687]: E1125 09:05:32.734998 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:32 crc kubenswrapper[4687]: E1125 09:05:32.735397 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:32 crc kubenswrapper[4687]: E1125 09:05:32.735479 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:32 crc kubenswrapper[4687]: I1125 09:05:32.736586 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:32 crc kubenswrapper[4687]: E1125 09:05:32.736717 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:34 crc kubenswrapper[4687]: I1125 09:05:34.734490 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:34 crc kubenswrapper[4687]: E1125 09:05:34.734670 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:34 crc kubenswrapper[4687]: I1125 09:05:34.734721 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:34 crc kubenswrapper[4687]: I1125 09:05:34.734803 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:34 crc kubenswrapper[4687]: E1125 09:05:34.734862 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:34 crc kubenswrapper[4687]: E1125 09:05:34.734952 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:34 crc kubenswrapper[4687]: I1125 09:05:34.735045 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:34 crc kubenswrapper[4687]: E1125 09:05:34.735119 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:35 crc kubenswrapper[4687]: E1125 09:05:35.859224 4687 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:05:36 crc kubenswrapper[4687]: I1125 09:05:36.733765 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:36 crc kubenswrapper[4687]: I1125 09:05:36.733832 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:36 crc kubenswrapper[4687]: E1125 09:05:36.734613 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:36 crc kubenswrapper[4687]: I1125 09:05:36.733876 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:36 crc kubenswrapper[4687]: E1125 09:05:36.735628 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:36 crc kubenswrapper[4687]: I1125 09:05:36.733850 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:36 crc kubenswrapper[4687]: E1125 09:05:36.735836 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:36 crc kubenswrapper[4687]: E1125 09:05:36.734183 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:38 crc kubenswrapper[4687]: I1125 09:05:38.733717 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:38 crc kubenswrapper[4687]: I1125 09:05:38.733803 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:38 crc kubenswrapper[4687]: I1125 09:05:38.733823 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:38 crc kubenswrapper[4687]: I1125 09:05:38.733934 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:38 crc kubenswrapper[4687]: E1125 09:05:38.734375 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:38 crc kubenswrapper[4687]: E1125 09:05:38.734593 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:38 crc kubenswrapper[4687]: E1125 09:05:38.734687 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:38 crc kubenswrapper[4687]: E1125 09:05:38.734754 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:40 crc kubenswrapper[4687]: I1125 09:05:40.734798 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:40 crc kubenswrapper[4687]: I1125 09:05:40.734822 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:40 crc kubenswrapper[4687]: I1125 09:05:40.735140 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:40 crc kubenswrapper[4687]: E1125 09:05:40.735283 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:40 crc kubenswrapper[4687]: I1125 09:05:40.735297 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:40 crc kubenswrapper[4687]: E1125 09:05:40.735562 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:40 crc kubenswrapper[4687]: E1125 09:05:40.735639 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:40 crc kubenswrapper[4687]: I1125 09:05:40.735675 4687 scope.go:117] "RemoveContainer" containerID="b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219" Nov 25 09:05:40 crc kubenswrapper[4687]: I1125 09:05:40.736276 4687 scope.go:117] "RemoveContainer" containerID="fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189" Nov 25 09:05:40 crc kubenswrapper[4687]: E1125 09:05:40.737274 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:40 crc kubenswrapper[4687]: E1125 09:05:40.860495 4687 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:05:41 crc kubenswrapper[4687]: I1125 09:05:41.488559 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/3.log" Nov 25 09:05:41 crc kubenswrapper[4687]: I1125 09:05:41.491560 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerStarted","Data":"58d2e36fa26a4e84c8f9778e4cd44e111aa6feda866a77e47f9ba7ff7501bcd0"} Nov 25 09:05:41 crc kubenswrapper[4687]: I1125 09:05:41.491909 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:05:41 crc kubenswrapper[4687]: I1125 09:05:41.493932 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wlzrb_0e7c96e4-c7fa-466f-b0b6-495612ed71f8/kube-multus/1.log" Nov 25 09:05:41 crc kubenswrapper[4687]: I1125 09:05:41.493990 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wlzrb" event={"ID":"0e7c96e4-c7fa-466f-b0b6-495612ed71f8","Type":"ContainerStarted","Data":"c3581ad6cb85979bdd2e1342025b4b624bb9bebc32733ee23eb68b287b096ec6"} Nov 25 09:05:41 crc kubenswrapper[4687]: I1125 09:05:41.529050 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podStartSLOduration=112.529036654 podStartE2EDuration="1m52.529036654s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:41.52775406 +0000 UTC m=+136.581393778" watchObservedRunningTime="2025-11-25 09:05:41.529036654 +0000 UTC m=+136.582676372" Nov 25 09:05:41 crc kubenswrapper[4687]: I1125 09:05:41.580361 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-cscrb"] Nov 25 09:05:41 crc kubenswrapper[4687]: I1125 09:05:41.580489 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:41 crc kubenswrapper[4687]: E1125 09:05:41.580598 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:42 crc kubenswrapper[4687]: I1125 09:05:42.734574 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:42 crc kubenswrapper[4687]: E1125 09:05:42.734936 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:42 crc kubenswrapper[4687]: I1125 09:05:42.734672 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:42 crc kubenswrapper[4687]: I1125 09:05:42.734611 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:42 crc kubenswrapper[4687]: E1125 09:05:42.735007 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:42 crc kubenswrapper[4687]: I1125 09:05:42.734688 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:42 crc kubenswrapper[4687]: E1125 09:05:42.735047 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:42 crc kubenswrapper[4687]: E1125 09:05:42.735196 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:44 crc kubenswrapper[4687]: I1125 09:05:44.734236 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:44 crc kubenswrapper[4687]: I1125 09:05:44.734284 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:44 crc kubenswrapper[4687]: I1125 09:05:44.734280 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:44 crc kubenswrapper[4687]: I1125 09:05:44.734369 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:44 crc kubenswrapper[4687]: E1125 09:05:44.734475 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:05:44 crc kubenswrapper[4687]: E1125 09:05:44.734730 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cscrb" podUID="0433643a-5ed9-485b-a788-51de4a92f461" Nov 25 09:05:44 crc kubenswrapper[4687]: E1125 09:05:44.734855 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:05:44 crc kubenswrapper[4687]: E1125 09:05:44.735009 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:05:46 crc kubenswrapper[4687]: I1125 09:05:46.734128 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:46 crc kubenswrapper[4687]: I1125 09:05:46.734247 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:05:46 crc kubenswrapper[4687]: I1125 09:05:46.734254 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:46 crc kubenswrapper[4687]: I1125 09:05:46.734353 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:46 crc kubenswrapper[4687]: I1125 09:05:46.738985 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 09:05:46 crc kubenswrapper[4687]: I1125 09:05:46.739156 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 09:05:46 crc kubenswrapper[4687]: I1125 09:05:46.739458 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 09:05:46 crc kubenswrapper[4687]: I1125 09:05:46.739666 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 09:05:46 crc kubenswrapper[4687]: I1125 09:05:46.744723 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 09:05:46 crc kubenswrapper[4687]: I1125 09:05:46.744917 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.645423 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.645628 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:52 crc kubenswrapper[4687]: E1125 09:05:52.645688 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:07:54.645648181 +0000 UTC m=+269.699287929 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.646903 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.746814 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.746885 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.746923 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.753286 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.753817 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.754134 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.768567 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:05:52 crc kubenswrapper[4687]: I1125 09:05:52.794626 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:53 crc kubenswrapper[4687]: W1125 09:05:53.031641 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-ba0291512589b1130cba761a3a64b53c8f08a852e0fc47637a37df190ec1ee5f WatchSource:0}: Error finding container ba0291512589b1130cba761a3a64b53c8f08a852e0fc47637a37df190ec1ee5f: Status 404 returned error can't find the container with id ba0291512589b1130cba761a3a64b53c8f08a852e0fc47637a37df190ec1ee5f Nov 25 09:05:53 crc kubenswrapper[4687]: I1125 09:05:53.050354 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:05:53 crc kubenswrapper[4687]: I1125 09:05:53.543819 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f0b4038198f70d8231a8b1d1fa6d3f0ca87d53f8fdedb7edefbd8bbee328f4c9"} Nov 25 09:05:53 crc kubenswrapper[4687]: I1125 09:05:53.543950 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"ba0291512589b1130cba761a3a64b53c8f08a852e0fc47637a37df190ec1ee5f"} Nov 25 09:05:53 crc kubenswrapper[4687]: I1125 09:05:53.544166 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:05:53 crc kubenswrapper[4687]: I1125 09:05:53.546627 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"1402b516cc3774f8173319749137892ca1de02fe85d09607d66dde1761fb259b"} Nov 25 09:05:53 crc kubenswrapper[4687]: I1125 09:05:53.546713 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"1e411eea937edb0e45e20866363b30d03a9fa8afb42d316932fdd3aa76632ade"} Nov 25 09:05:53 crc kubenswrapper[4687]: I1125 09:05:53.555107 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"caeff70c01ee61f2b6d39b62c77e5caf72e48a5d1230401ea0542d3da7836760"} Nov 25 09:05:53 crc kubenswrapper[4687]: I1125 09:05:53.555412 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"b60320c18d417bae7ebb09d58a6e7f735f2ba6d7f97da4f83c051ca606021540"} Nov 25 09:05:53 crc kubenswrapper[4687]: I1125 09:05:53.844735 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:05:53 crc kubenswrapper[4687]: I1125 09:05:53.844879 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.300360 4687 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.343374 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.343792 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.346239 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hwpb2"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.349762 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.350123 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2qjbv"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.350180 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.350281 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.350448 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.351071 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.352058 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.352384 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-v9zjk"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.353224 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.353731 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.357004 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.357783 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.358494 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.358967 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-zgglt"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.359601 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.359808 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-c7v75"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.360229 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.360338 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.360766 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.360801 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.361944 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.366007 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.366648 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.366811 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.366963 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.367487 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.367681 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.368744 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.382964 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.383055 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.382955 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.383905 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.384247 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.384641 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.384961 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.385035 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.385481 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-6gb62"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.387989 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.389028 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.389311 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.392720 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.392952 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.393979 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.394190 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.400014 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.401564 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.403326 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.418761 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.420335 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-xmxvb"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.420814 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-xmxvb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.421329 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.421533 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.421687 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.422050 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.422259 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.422420 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.422653 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.422801 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.422896 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.422802 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.423094 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.422839 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.422861 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.423378 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.423572 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.423675 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.426788 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.426864 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.426940 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.427078 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.427169 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.427202 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.427235 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.427343 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.427416 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.427613 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.427825 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.427996 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428114 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428198 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428279 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428391 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428464 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428524 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428563 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428654 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428738 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428819 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.428896 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.427617 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.429196 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.429324 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.429677 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.430882 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.431903 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.437916 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-76gwh"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.438384 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-68pmb"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.438536 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.438706 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.440607 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5jnmz"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.441189 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2d9pq"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.441647 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.441815 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.452692 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.453015 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.452754 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.453514 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.454784 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.455789 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.456086 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.469381 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.470445 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.470853 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.471541 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.471698 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.472261 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.472435 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.472679 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.472808 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.472861 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.473054 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.473155 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.473102 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.475358 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.475477 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.476000 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.476980 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477081 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-config\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477362 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477451 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-image-import-ca\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477527 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477545 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f8678ea-08b9-4dcd-b70d-e19800c697e3-config\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477586 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477625 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-dir\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477654 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-config\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477668 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477684 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-6qjt6\" (UID: \"efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477748 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477799 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477875 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6202d20a-5377-4876-a2cb-700e4b0ccf60-etcd-client\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477908 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mltqm\" (UniqueName: \"kubernetes.io/projected/5cffffee-7f08-4c94-bf30-15419e5e91e2-kube-api-access-mltqm\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477935 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6202d20a-5377-4876-a2cb-700e4b0ccf60-encryption-config\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477962 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25aecd46-36d8-4ee9-bae5-4731e91b5e74-config\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.477998 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478175 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25fbr\" (UniqueName: \"kubernetes.io/projected/25aecd46-36d8-4ee9-bae5-4731e91b5e74-kube-api-access-25fbr\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478254 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478274 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5cffffee-7f08-4c94-bf30-15419e5e91e2-encryption-config\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478294 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e0750137-60d4-4ea8-be2b-097a562d4b2f-auth-proxy-config\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478253 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478628 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478312 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478316 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-client-ca\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478527 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478592 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478636 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.478686 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6202d20a-5377-4876-a2cb-700e4b0ccf60-serving-cert\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.479234 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.480738 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.481451 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.481718 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.493871 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6202d20a-5377-4876-a2cb-700e4b0ccf60-audit-dir\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.494156 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.494269 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5cffffee-7f08-4c94-bf30-15419e5e91e2-audit-dir\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.494353 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-service-ca\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.494467 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e8238e53-faf8-4dc1-a726-76368f0319be-serving-cert\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.494616 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5cffffee-7f08-4c94-bf30-15419e5e91e2-serving-cert\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.494721 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-config\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.494820 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35177658-25cb-4a51-a14e-5fb925283ac8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-bskt8\" (UID: \"35177658-25cb-4a51-a14e-5fb925283ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.494939 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k859w\" (UniqueName: \"kubernetes.io/projected/35177658-25cb-4a51-a14e-5fb925283ac8-kube-api-access-k859w\") pod \"openshift-apiserver-operator-796bbdcf4f-bskt8\" (UID: \"35177658-25cb-4a51-a14e-5fb925283ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495054 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/25aecd46-36d8-4ee9-bae5-4731e91b5e74-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495146 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495242 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc7gm\" (UniqueName: \"kubernetes.io/projected/e8238e53-faf8-4dc1-a726-76368f0319be-kube-api-access-jc7gm\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495346 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5cffffee-7f08-4c94-bf30-15419e5e91e2-etcd-client\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495450 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjlfh\" (UniqueName: \"kubernetes.io/projected/31a14c62-0956-4d44-98f4-724da42f4e78-kube-api-access-wjlfh\") pod \"openshift-config-operator-7777fb866f-fq9w8\" (UID: \"31a14c62-0956-4d44-98f4-724da42f4e78\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495586 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc5r6\" (UniqueName: \"kubernetes.io/projected/057a2f29-f877-40e5-9a25-d1a2d26918ad-kube-api-access-lc5r6\") pod \"downloads-7954f5f757-xmxvb\" (UID: \"057a2f29-f877-40e5-9a25-d1a2d26918ad\") " pod="openshift-console/downloads-7954f5f757-xmxvb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495682 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0750137-60d4-4ea8-be2b-097a562d4b2f-config\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495799 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495930 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smzws\" (UniqueName: \"kubernetes.io/projected/69faad41-a827-4fd3-b43e-036297dc2c9f-kube-api-access-smzws\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496044 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-policies\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495705 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.495008 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496612 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f8678ea-08b9-4dcd-b70d-e19800c697e3-trusted-ca\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496657 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-client-ca\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496678 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5cffffee-7f08-4c94-bf30-15419e5e91e2-audit-policies\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496326 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496735 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-oauth-config\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496754 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e0750137-60d4-4ea8-be2b-097a562d4b2f-machine-approver-tls\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496782 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f8678ea-08b9-4dcd-b70d-e19800c697e3-serving-cert\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496524 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496817 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/31a14c62-0956-4d44-98f4-724da42f4e78-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fq9w8\" (UID: \"31a14c62-0956-4d44-98f4-724da42f4e78\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496851 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-config\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496865 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwzbp\" (UniqueName: \"kubernetes.io/projected/6202d20a-5377-4876-a2cb-700e4b0ccf60-kube-api-access-wwzbp\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496918 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5cffffee-7f08-4c94-bf30-15419e5e91e2-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.496951 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497039 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5cffffee-7f08-4c94-bf30-15419e5e91e2-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497072 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f9bd19fb-a226-460a-8164-5538673a3783-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497093 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/31a14c62-0956-4d44-98f4-724da42f4e78-serving-cert\") pod \"openshift-config-operator-7777fb866f-fq9w8\" (UID: \"31a14c62-0956-4d44-98f4-724da42f4e78\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497120 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbx4j\" (UniqueName: \"kubernetes.io/projected/efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3-kube-api-access-bbx4j\") pod \"cluster-samples-operator-665b6dd947-6qjt6\" (UID: \"efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497139 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f9bd19fb-a226-460a-8164-5538673a3783-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497179 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f9bd19fb-a226-460a-8164-5538673a3783-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497198 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skrpq\" (UniqueName: \"kubernetes.io/projected/f9bd19fb-a226-460a-8164-5538673a3783-kube-api-access-skrpq\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497221 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497247 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497261 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwpbx\" (UniqueName: \"kubernetes.io/projected/fbe26cf0-9829-4e16-b4c6-24484b1e678a-kube-api-access-cwpbx\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497280 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sprbc\" (UniqueName: \"kubernetes.io/projected/0a45ab31-45db-4069-8da2-4c53cd2689ca-kube-api-access-sprbc\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497298 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6202d20a-5377-4876-a2cb-700e4b0ccf60-node-pullsecrets\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497311 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-audit\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497327 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497374 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-trusted-ca-bundle\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497389 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-oauth-serving-cert\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497409 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497425 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/25aecd46-36d8-4ee9-bae5-4731e91b5e74-images\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497442 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a45ab31-45db-4069-8da2-4c53cd2689ca-serving-cert\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497474 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kvgj\" (UniqueName: \"kubernetes.io/projected/e0750137-60d4-4ea8-be2b-097a562d4b2f-kube-api-access-9kvgj\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497491 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497522 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvbrm\" (UniqueName: \"kubernetes.io/projected/9f8678ea-08b9-4dcd-b70d-e19800c697e3-kube-api-access-vvbrm\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497538 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-serving-cert\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.502403 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.502971 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gm5pk"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.503409 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.504278 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.504733 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.504366 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.505129 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.505426 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.508384 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-p2nkd"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.508849 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.509251 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.509721 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.510162 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.510948 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.511063 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.511371 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.513225 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.513440 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.514964 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.521693 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v6xqm"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.522278 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vftf5"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.522596 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.522911 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.523003 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.523031 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.523262 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.523348 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.523932 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2qjbv"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.523989 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.497551 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-etcd-serving-ca\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.524647 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.524668 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35177658-25cb-4a51-a14e-5fb925283ac8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-bskt8\" (UID: \"35177658-25cb-4a51-a14e-5fb925283ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.525862 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.526572 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.526968 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.527061 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.527208 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.527636 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.529757 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.534725 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hwpb2"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.535995 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-cpbmq"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.536745 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.537091 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-zgglt"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.538570 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.539871 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.540632 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-6gb62"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.542880 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-tw4q9"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.544876 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-tw4q9" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.548487 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.548693 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.552569 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.555049 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5jnmz"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.561713 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2d9pq"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.564511 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.565925 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.578845 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.581368 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.586909 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.587553 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-v9zjk"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.591234 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.594290 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.596759 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.599778 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.602544 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.604241 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gm5pk"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.626753 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627046 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-dir\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627086 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b307de3a-d363-4077-933e-68f51ae40158-signing-key\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627105 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5w85g\" (UniqueName: \"kubernetes.io/projected/b307de3a-d363-4077-933e-68f51ae40158-kube-api-access-5w85g\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627126 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-config\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627145 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-6qjt6\" (UID: \"efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627160 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6202d20a-5377-4876-a2cb-700e4b0ccf60-etcd-client\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627178 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mltqm\" (UniqueName: \"kubernetes.io/projected/5cffffee-7f08-4c94-bf30-15419e5e91e2-kube-api-access-mltqm\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627193 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6202d20a-5377-4876-a2cb-700e4b0ccf60-encryption-config\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627207 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25aecd46-36d8-4ee9-bae5-4731e91b5e74-config\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627221 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25fbr\" (UniqueName: \"kubernetes.io/projected/25aecd46-36d8-4ee9-bae5-4731e91b5e74-kube-api-access-25fbr\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627236 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-etcd-service-ca\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627267 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627283 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5cffffee-7f08-4c94-bf30-15419e5e91e2-encryption-config\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627297 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e0750137-60d4-4ea8-be2b-097a562d4b2f-auth-proxy-config\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627314 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-client-ca\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627328 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6202d20a-5377-4876-a2cb-700e4b0ccf60-serving-cert\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627342 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6202d20a-5377-4876-a2cb-700e4b0ccf60-audit-dir\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627360 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627374 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5cffffee-7f08-4c94-bf30-15419e5e91e2-audit-dir\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627389 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-service-ca\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627403 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e8238e53-faf8-4dc1-a726-76368f0319be-serving-cert\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627418 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5cffffee-7f08-4c94-bf30-15419e5e91e2-serving-cert\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627438 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-config\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627453 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35177658-25cb-4a51-a14e-5fb925283ac8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-bskt8\" (UID: \"35177658-25cb-4a51-a14e-5fb925283ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627470 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k859w\" (UniqueName: \"kubernetes.io/projected/35177658-25cb-4a51-a14e-5fb925283ac8-kube-api-access-k859w\") pod \"openshift-apiserver-operator-796bbdcf4f-bskt8\" (UID: \"35177658-25cb-4a51-a14e-5fb925283ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627487 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/25aecd46-36d8-4ee9-bae5-4731e91b5e74-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627514 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627531 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc7gm\" (UniqueName: \"kubernetes.io/projected/e8238e53-faf8-4dc1-a726-76368f0319be-kube-api-access-jc7gm\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627548 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5cffffee-7f08-4c94-bf30-15419e5e91e2-etcd-client\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627565 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjlfh\" (UniqueName: \"kubernetes.io/projected/31a14c62-0956-4d44-98f4-724da42f4e78-kube-api-access-wjlfh\") pod \"openshift-config-operator-7777fb866f-fq9w8\" (UID: \"31a14c62-0956-4d44-98f4-724da42f4e78\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627580 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lc5r6\" (UniqueName: \"kubernetes.io/projected/057a2f29-f877-40e5-9a25-d1a2d26918ad-kube-api-access-lc5r6\") pod \"downloads-7954f5f757-xmxvb\" (UID: \"057a2f29-f877-40e5-9a25-d1a2d26918ad\") " pod="openshift-console/downloads-7954f5f757-xmxvb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627597 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0750137-60d4-4ea8-be2b-097a562d4b2f-config\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627614 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627643 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smzws\" (UniqueName: \"kubernetes.io/projected/69faad41-a827-4fd3-b43e-036297dc2c9f-kube-api-access-smzws\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627658 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-policies\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627674 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f8678ea-08b9-4dcd-b70d-e19800c697e3-trusted-ca\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627692 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f8521f89-513c-41da-897e-9ea6fc278c21-auth-proxy-config\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627709 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-client-ca\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627724 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5cffffee-7f08-4c94-bf30-15419e5e91e2-audit-policies\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627739 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-etcd-ca\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627755 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtlpk\" (UniqueName: \"kubernetes.io/projected/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-kube-api-access-qtlpk\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627772 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-oauth-config\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627788 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e0750137-60d4-4ea8-be2b-097a562d4b2f-machine-approver-tls\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627803 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f8678ea-08b9-4dcd-b70d-e19800c697e3-serving-cert\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627819 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-config\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627838 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/31a14c62-0956-4d44-98f4-724da42f4e78-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fq9w8\" (UID: \"31a14c62-0956-4d44-98f4-724da42f4e78\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627855 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-config\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627870 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwzbp\" (UniqueName: \"kubernetes.io/projected/6202d20a-5377-4876-a2cb-700e4b0ccf60-kube-api-access-wwzbp\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627885 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5cffffee-7f08-4c94-bf30-15419e5e91e2-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627902 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627918 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5cffffee-7f08-4c94-bf30-15419e5e91e2-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627933 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f8521f89-513c-41da-897e-9ea6fc278c21-images\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627949 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gh26m\" (UniqueName: \"kubernetes.io/projected/f8521f89-513c-41da-897e-9ea6fc278c21-kube-api-access-gh26m\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627970 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f9bd19fb-a226-460a-8164-5538673a3783-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.627984 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/31a14c62-0956-4d44-98f4-724da42f4e78-serving-cert\") pod \"openshift-config-operator-7777fb866f-fq9w8\" (UID: \"31a14c62-0956-4d44-98f4-724da42f4e78\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.628001 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-etcd-client\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.628016 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f8521f89-513c-41da-897e-9ea6fc278c21-proxy-tls\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.628018 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5cffffee-7f08-4c94-bf30-15419e5e91e2-audit-dir\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.628653 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0750137-60d4-4ea8-be2b-097a562d4b2f-config\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.629221 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-service-ca\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.630046 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5cffffee-7f08-4c94-bf30-15419e5e91e2-audit-policies\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.630548 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-policies\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.631421 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.631483 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-dir\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.631810 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.632096 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5cffffee-7f08-4c94-bf30-15419e5e91e2-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.632104 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-config\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.632619 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f8678ea-08b9-4dcd-b70d-e19800c697e3-trusted-ca\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.632887 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/31a14c62-0956-4d44-98f4-724da42f4e78-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fq9w8\" (UID: \"31a14c62-0956-4d44-98f4-724da42f4e78\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.633079 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.633145 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.633162 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v6xqm"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.633342 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-client-ca\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.633738 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5cffffee-7f08-4c94-bf30-15419e5e91e2-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.633766 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e8238e53-faf8-4dc1-a726-76368f0319be-serving-cert\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.634374 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-config\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.635039 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/31a14c62-0956-4d44-98f4-724da42f4e78-serving-cert\") pod \"openshift-config-operator-7777fb866f-fq9w8\" (UID: \"31a14c62-0956-4d44-98f4-724da42f4e78\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.635105 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-6qjt6\" (UID: \"efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.635149 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.636075 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-config\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.636703 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5cffffee-7f08-4c94-bf30-15419e5e91e2-serving-cert\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.636703 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.636819 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-oauth-config\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.628034 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbx4j\" (UniqueName: \"kubernetes.io/projected/efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3-kube-api-access-bbx4j\") pod \"cluster-samples-operator-665b6dd947-6qjt6\" (UID: \"efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.636902 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f9bd19fb-a226-460a-8164-5538673a3783-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.636931 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92pzs\" (UniqueName: \"kubernetes.io/projected/a3ec0e37-3e46-44e2-97ac-e7a81b8255b5-kube-api-access-92pzs\") pod \"dns-operator-744455d44c-5jnmz\" (UID: \"a3ec0e37-3e46-44e2-97ac-e7a81b8255b5\") " pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637009 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f9bd19fb-a226-460a-8164-5538673a3783-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637032 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skrpq\" (UniqueName: \"kubernetes.io/projected/f9bd19fb-a226-460a-8164-5538673a3783-kube-api-access-skrpq\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637055 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637218 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637255 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwpbx\" (UniqueName: \"kubernetes.io/projected/fbe26cf0-9829-4e16-b4c6-24484b1e678a-kube-api-access-cwpbx\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637300 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sprbc\" (UniqueName: \"kubernetes.io/projected/0a45ab31-45db-4069-8da2-4c53cd2689ca-kube-api-access-sprbc\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637320 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6202d20a-5377-4876-a2cb-700e4b0ccf60-node-pullsecrets\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637336 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-audit\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637352 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637370 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-trusted-ca-bundle\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637366 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25aecd46-36d8-4ee9-bae5-4731e91b5e74-config\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637388 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-oauth-serving-cert\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637444 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637478 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/25aecd46-36d8-4ee9-bae5-4731e91b5e74-images\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637510 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a45ab31-45db-4069-8da2-4c53cd2689ca-serving-cert\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637526 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kvgj\" (UniqueName: \"kubernetes.io/projected/e0750137-60d4-4ea8-be2b-097a562d4b2f-kube-api-access-9kvgj\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.638536 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-client-ca\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.637555 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.639220 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvbrm\" (UniqueName: \"kubernetes.io/projected/9f8678ea-08b9-4dcd-b70d-e19800c697e3-kube-api-access-vvbrm\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.639300 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-serving-cert\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.639375 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-etcd-serving-ca\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.639626 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.639710 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35177658-25cb-4a51-a14e-5fb925283ac8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-bskt8\" (UID: \"35177658-25cb-4a51-a14e-5fb925283ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.639779 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-serving-cert\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.640824 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.640901 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-config\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.640981 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-image-import-ca\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.641053 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f8678ea-08b9-4dcd-b70d-e19800c697e3-config\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.641128 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.641201 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a3ec0e37-3e46-44e2-97ac-e7a81b8255b5-metrics-tls\") pod \"dns-operator-744455d44c-5jnmz\" (UID: \"a3ec0e37-3e46-44e2-97ac-e7a81b8255b5\") " pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.641274 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b307de3a-d363-4077-933e-68f51ae40158-signing-cabundle\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.642473 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.643110 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f9bd19fb-a226-460a-8164-5538673a3783-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.639855 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.646381 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6202d20a-5377-4876-a2cb-700e4b0ccf60-audit-dir\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.646653 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e0750137-60d4-4ea8-be2b-097a562d4b2f-machine-approver-tls\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.640217 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6202d20a-5377-4876-a2cb-700e4b0ccf60-node-pullsecrets\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.647008 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-etcd-serving-ca\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.647033 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-oauth-serving-cert\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.647408 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.647540 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-audit\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.640083 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.648100 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.648214 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35177658-25cb-4a51-a14e-5fb925283ac8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-bskt8\" (UID: \"35177658-25cb-4a51-a14e-5fb925283ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.648821 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.649629 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/25aecd46-36d8-4ee9-bae5-4731e91b5e74-images\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.649660 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-trusted-ca-bundle\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.649753 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e0750137-60d4-4ea8-be2b-097a562d4b2f-auth-proxy-config\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.649764 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6202d20a-5377-4876-a2cb-700e4b0ccf60-image-import-ca\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.649880 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-config\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.650042 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f8678ea-08b9-4dcd-b70d-e19800c697e3-config\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.650450 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-xmxvb"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.651037 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-serving-cert\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.651328 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.651470 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.651582 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a45ab31-45db-4069-8da2-4c53cd2689ca-serving-cert\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.652040 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.652235 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.653544 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35177658-25cb-4a51-a14e-5fb925283ac8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-bskt8\" (UID: \"35177658-25cb-4a51-a14e-5fb925283ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.653586 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6202d20a-5377-4876-a2cb-700e4b0ccf60-etcd-client\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.653713 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/25aecd46-36d8-4ee9-bae5-4731e91b5e74-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.653966 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5cffffee-7f08-4c94-bf30-15419e5e91e2-etcd-client\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.654118 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f8678ea-08b9-4dcd-b70d-e19800c697e3-serving-cert\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.654430 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6202d20a-5377-4876-a2cb-700e4b0ccf60-encryption-config\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.654570 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6202d20a-5377-4876-a2cb-700e4b0ccf60-serving-cert\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.654923 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5cffffee-7f08-4c94-bf30-15419e5e91e2-encryption-config\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.655216 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f9bd19fb-a226-460a-8164-5538673a3783-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.655359 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.655740 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-76gwh"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.656061 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.657045 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-jc6zc"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.657808 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.658452 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-c7v75"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.661414 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.661442 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.662670 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.663899 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-tw4q9"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.664112 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-68pmb"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.666863 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.667045 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.669064 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vftf5"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.669638 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-cpbmq"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.669695 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.672773 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.674306 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-cx6sn"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.675683 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.678168 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-cx6sn"] Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.686280 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.706578 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.726017 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.742091 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-serving-cert\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.742243 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a3ec0e37-3e46-44e2-97ac-e7a81b8255b5-metrics-tls\") pod \"dns-operator-744455d44c-5jnmz\" (UID: \"a3ec0e37-3e46-44e2-97ac-e7a81b8255b5\") " pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.742354 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b307de3a-d363-4077-933e-68f51ae40158-signing-cabundle\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.742594 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b307de3a-d363-4077-933e-68f51ae40158-signing-key\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.742727 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5w85g\" (UniqueName: \"kubernetes.io/projected/b307de3a-d363-4077-933e-68f51ae40158-kube-api-access-5w85g\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.743107 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-etcd-service-ca\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.744197 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-etcd-service-ca\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.745430 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f8521f89-513c-41da-897e-9ea6fc278c21-auth-proxy-config\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.745568 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f8521f89-513c-41da-897e-9ea6fc278c21-auth-proxy-config\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.745757 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-etcd-ca\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.746073 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtlpk\" (UniqueName: \"kubernetes.io/projected/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-kube-api-access-qtlpk\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.746162 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-config\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.746249 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f8521f89-513c-41da-897e-9ea6fc278c21-images\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.746325 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gh26m\" (UniqueName: \"kubernetes.io/projected/f8521f89-513c-41da-897e-9ea6fc278c21-kube-api-access-gh26m\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.746410 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-etcd-client\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.746779 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f8521f89-513c-41da-897e-9ea6fc278c21-proxy-tls\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.746867 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92pzs\" (UniqueName: \"kubernetes.io/projected/a3ec0e37-3e46-44e2-97ac-e7a81b8255b5-kube-api-access-92pzs\") pod \"dns-operator-744455d44c-5jnmz\" (UID: \"a3ec0e37-3e46-44e2-97ac-e7a81b8255b5\") " pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.747228 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-etcd-ca\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.747449 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-config\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.752382 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a3ec0e37-3e46-44e2-97ac-e7a81b8255b5-metrics-tls\") pod \"dns-operator-744455d44c-5jnmz\" (UID: \"a3ec0e37-3e46-44e2-97ac-e7a81b8255b5\") " pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.752470 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.752709 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-serving-cert\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.758370 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-etcd-client\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.766097 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.785958 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.806343 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.826238 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.846147 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.865573 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.906348 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.911733 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.929097 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.945667 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.966533 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 09:05:54 crc kubenswrapper[4687]: I1125 09:05:54.985908 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.006458 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.026698 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.067114 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.086758 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.106758 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.126081 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.145605 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.166349 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.185928 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.187987 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f8521f89-513c-41da-897e-9ea6fc278c21-images\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.206068 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.225592 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.232291 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f8521f89-513c-41da-897e-9ea6fc278c21-proxy-tls\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.247239 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.267025 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.286529 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.306942 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.326343 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.345908 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.367073 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.386011 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.406216 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.427601 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.446313 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.467341 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.485741 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.507065 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.524444 4687 request.go:700] Waited for 1.014408163s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/secrets?fieldSelector=metadata.name%3Dserving-cert&limit=500&resourceVersion=0 Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.526388 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.546305 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.566810 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.586523 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.605542 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.626911 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.660920 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.665445 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.686929 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.707222 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.726317 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 09:05:55 crc kubenswrapper[4687]: E1125 09:05:55.743311 4687 secret.go:188] Couldn't get secret openshift-service-ca/signing-key: failed to sync secret cache: timed out waiting for the condition Nov 25 09:05:55 crc kubenswrapper[4687]: E1125 09:05:55.743374 4687 configmap.go:193] Couldn't get configMap openshift-service-ca/signing-cabundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 09:05:55 crc kubenswrapper[4687]: E1125 09:05:55.743422 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b307de3a-d363-4077-933e-68f51ae40158-signing-key podName:b307de3a-d363-4077-933e-68f51ae40158 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:56.24339668 +0000 UTC m=+151.297036408 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/b307de3a-d363-4077-933e-68f51ae40158-signing-key") pod "service-ca-9c57cc56f-vftf5" (UID: "b307de3a-d363-4077-933e-68f51ae40158") : failed to sync secret cache: timed out waiting for the condition Nov 25 09:05:55 crc kubenswrapper[4687]: E1125 09:05:55.743448 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b307de3a-d363-4077-933e-68f51ae40158-signing-cabundle podName:b307de3a-d363-4077-933e-68f51ae40158 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:56.243437851 +0000 UTC m=+151.297077589 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/b307de3a-d363-4077-933e-68f51ae40158-signing-cabundle") pod "service-ca-9c57cc56f-vftf5" (UID: "b307de3a-d363-4077-933e-68f51ae40158") : failed to sync configmap cache: timed out waiting for the condition Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.746206 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.766099 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.789896 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.806530 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.827203 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.846167 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.865845 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.885953 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.905806 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.926889 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.946285 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.967174 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:05:55 crc kubenswrapper[4687]: I1125 09:05:55.985964 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.007085 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.026865 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.047113 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.067612 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.086172 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.107219 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.147038 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.166836 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.186575 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.207315 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.227138 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.245633 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.264492 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b307de3a-d363-4077-933e-68f51ae40158-signing-cabundle\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.264578 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b307de3a-d363-4077-933e-68f51ae40158-signing-key\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.266105 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.266120 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b307de3a-d363-4077-933e-68f51ae40158-signing-cabundle\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.270303 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b307de3a-d363-4077-933e-68f51ae40158-signing-key\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.303046 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lc5r6\" (UniqueName: \"kubernetes.io/projected/057a2f29-f877-40e5-9a25-d1a2d26918ad-kube-api-access-lc5r6\") pod \"downloads-7954f5f757-xmxvb\" (UID: \"057a2f29-f877-40e5-9a25-d1a2d26918ad\") " pod="openshift-console/downloads-7954f5f757-xmxvb" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.329726 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbx4j\" (UniqueName: \"kubernetes.io/projected/efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3-kube-api-access-bbx4j\") pod \"cluster-samples-operator-665b6dd947-6qjt6\" (UID: \"efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.346313 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smzws\" (UniqueName: \"kubernetes.io/projected/69faad41-a827-4fd3-b43e-036297dc2c9f-kube-api-access-smzws\") pod \"oauth-openshift-558db77b4-c7v75\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.376862 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f9bd19fb-a226-460a-8164-5538673a3783-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.389029 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwzbp\" (UniqueName: \"kubernetes.io/projected/6202d20a-5377-4876-a2cb-700e4b0ccf60-kube-api-access-wwzbp\") pod \"apiserver-76f77b778f-hwpb2\" (UID: \"6202d20a-5377-4876-a2cb-700e4b0ccf60\") " pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.389861 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.415401 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k859w\" (UniqueName: \"kubernetes.io/projected/35177658-25cb-4a51-a14e-5fb925283ac8-kube-api-access-k859w\") pod \"openshift-apiserver-operator-796bbdcf4f-bskt8\" (UID: \"35177658-25cb-4a51-a14e-5fb925283ac8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.427811 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc7gm\" (UniqueName: \"kubernetes.io/projected/e8238e53-faf8-4dc1-a726-76368f0319be-kube-api-access-jc7gm\") pod \"controller-manager-879f6c89f-2qjbv\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.428080 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.451261 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjlfh\" (UniqueName: \"kubernetes.io/projected/31a14c62-0956-4d44-98f4-724da42f4e78-kube-api-access-wjlfh\") pod \"openshift-config-operator-7777fb866f-fq9w8\" (UID: \"31a14c62-0956-4d44-98f4-724da42f4e78\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.464960 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mltqm\" (UniqueName: \"kubernetes.io/projected/5cffffee-7f08-4c94-bf30-15419e5e91e2-kube-api-access-mltqm\") pod \"apiserver-7bbb656c7d-bvkgm\" (UID: \"5cffffee-7f08-4c94-bf30-15419e5e91e2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.483407 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25fbr\" (UniqueName: \"kubernetes.io/projected/25aecd46-36d8-4ee9-bae5-4731e91b5e74-kube-api-access-25fbr\") pod \"machine-api-operator-5694c8668f-v9zjk\" (UID: \"25aecd46-36d8-4ee9-bae5-4731e91b5e74\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.495801 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.500206 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.503621 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skrpq\" (UniqueName: \"kubernetes.io/projected/f9bd19fb-a226-460a-8164-5538673a3783-kube-api-access-skrpq\") pod \"cluster-image-registry-operator-dc59b4c8b-c27v5\" (UID: \"f9bd19fb-a226-460a-8164-5538673a3783\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.503653 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-xmxvb" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.524869 4687 request.go:700] Waited for 1.877668505s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/serviceaccounts/console-operator/token Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.526774 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwpbx\" (UniqueName: \"kubernetes.io/projected/fbe26cf0-9829-4e16-b4c6-24484b1e678a-kube-api-access-cwpbx\") pod \"console-f9d7485db-zgglt\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.532977 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.551157 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvbrm\" (UniqueName: \"kubernetes.io/projected/9f8678ea-08b9-4dcd-b70d-e19800c697e3-kube-api-access-vvbrm\") pod \"console-operator-58897d9998-6gb62\" (UID: \"9f8678ea-08b9-4dcd-b70d-e19800c697e3\") " pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.564536 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sprbc\" (UniqueName: \"kubernetes.io/projected/0a45ab31-45db-4069-8da2-4c53cd2689ca-kube-api-access-sprbc\") pod \"route-controller-manager-6576b87f9c-hqtxf\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.573752 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.586599 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kvgj\" (UniqueName: \"kubernetes.io/projected/e0750137-60d4-4ea8-be2b-097a562d4b2f-kube-api-access-9kvgj\") pod \"machine-approver-56656f9798-xnvrp\" (UID: \"e0750137-60d4-4ea8-be2b-097a562d4b2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.587019 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.601064 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6"] Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.606164 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.613913 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.627944 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.635957 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.635981 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.640869 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.649063 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.651544 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-c7v75"] Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.665538 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 09:05:56 crc kubenswrapper[4687]: W1125 09:05:56.681832 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69faad41_a827_4fd3_b43e_036297dc2c9f.slice/crio-6adad7d5702b7f3841a544aa3960d52c0f1eb70c482375eab20326e4b9768c82 WatchSource:0}: Error finding container 6adad7d5702b7f3841a544aa3960d52c0f1eb70c482375eab20326e4b9768c82: Status 404 returned error can't find the container with id 6adad7d5702b7f3841a544aa3960d52c0f1eb70c482375eab20326e4b9768c82 Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.686027 4687 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.690883 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.706479 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.723731 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5w85g\" (UniqueName: \"kubernetes.io/projected/b307de3a-d363-4077-933e-68f51ae40158-kube-api-access-5w85g\") pod \"service-ca-9c57cc56f-vftf5\" (UID: \"b307de3a-d363-4077-933e-68f51ae40158\") " pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.743999 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtlpk\" (UniqueName: \"kubernetes.io/projected/e9c2451a-27de-43d2-a9e8-90f33ab30ce1-kube-api-access-qtlpk\") pod \"etcd-operator-b45778765-68pmb\" (UID: \"e9c2451a-27de-43d2-a9e8-90f33ab30ce1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.764368 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gh26m\" (UniqueName: \"kubernetes.io/projected/f8521f89-513c-41da-897e-9ea6fc278c21-kube-api-access-gh26m\") pod \"machine-config-operator-74547568cd-9r7dh\" (UID: \"f8521f89-513c-41da-897e-9ea6fc278c21\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.780705 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.787989 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92pzs\" (UniqueName: \"kubernetes.io/projected/a3ec0e37-3e46-44e2-97ac-e7a81b8255b5-kube-api-access-92pzs\") pod \"dns-operator-744455d44c-5jnmz\" (UID: \"a3ec0e37-3e46-44e2-97ac-e7a81b8255b5\") " pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.788481 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8"] Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.795459 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.810943 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hwpb2"] Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.818967 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.833181 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" Nov 25 09:05:56 crc kubenswrapper[4687]: W1125 09:05:56.851042 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6202d20a_5377_4876_a2cb_700e4b0ccf60.slice/crio-4aed2bcaa5571a1fd3ac99d2f12ef3532412c0de60864d24581ea1002cf0d8c1 WatchSource:0}: Error finding container 4aed2bcaa5571a1fd3ac99d2f12ef3532412c0de60864d24581ea1002cf0d8c1: Status 404 returned error can't find the container with id 4aed2bcaa5571a1fd3ac99d2f12ef3532412c0de60864d24581ea1002cf0d8c1 Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.900628 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-xmxvb"] Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.901563 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/6dfd611b-2429-43f4-8ccb-9a5b2138a4df-profile-collector-cert\") pod \"catalog-operator-68c6474976-5tvvm\" (UID: \"6dfd611b-2429-43f4-8ccb-9a5b2138a4df\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.901759 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/6dfd611b-2429-43f4-8ccb-9a5b2138a4df-srv-cert\") pod \"catalog-operator-68c6474976-5tvvm\" (UID: \"6dfd611b-2429-43f4-8ccb-9a5b2138a4df\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.901864 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e26524b-c8ef-47d0-8a03-ee87cb3e06fe-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jqcw\" (UID: \"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.901916 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-registry-tls\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.902074 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nz62p\" (UniqueName: \"kubernetes.io/projected/3f949ab7-cad8-4a34-b419-42a3dd61a4fc-kube-api-access-nz62p\") pod \"olm-operator-6b444d44fb-vtn5b\" (UID: \"3f949ab7-cad8-4a34-b419-42a3dd61a4fc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.902431 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24cfae3a-2b46-4e2e-93ec-7e7dc089af87-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-zcrgz\" (UID: \"24cfae3a-2b46-4e2e-93ec-7e7dc089af87\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.902562 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vksl\" (UniqueName: \"kubernetes.io/projected/9aee268c-0a3a-4f5b-8449-c71d027e9d97-kube-api-access-8vksl\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.902847 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3f949ab7-cad8-4a34-b419-42a3dd61a4fc-srv-cert\") pod \"olm-operator-6b444d44fb-vtn5b\" (UID: \"3f949ab7-cad8-4a34-b419-42a3dd61a4fc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.903091 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8172038a-a448-40d0-834a-6f059d3f7738-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v6xqm\" (UID: \"8172038a-a448-40d0-834a-6f059d3f7738\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.905765 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e34299cb-9872-41f2-b75b-5cfc1d309a56-config\") pod \"kube-apiserver-operator-766d6c64bb-qgkfv\" (UID: \"e34299cb-9872-41f2-b75b-5cfc1d309a56\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.905862 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmpnr\" (UniqueName: \"kubernetes.io/projected/9c3bce2f-351a-4a20-95d0-404b4c117ee3-kube-api-access-lmpnr\") pod \"machine-config-controller-84d6567774-ntss9\" (UID: \"9c3bce2f-351a-4a20-95d0-404b4c117ee3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.905934 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4cf28713-ccae-4b06-bdb6-f52d0426ac47-serving-cert\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.905967 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24cfae3a-2b46-4e2e-93ec-7e7dc089af87-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-zcrgz\" (UID: \"24cfae3a-2b46-4e2e-93ec-7e7dc089af87\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.905994 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9aee268c-0a3a-4f5b-8449-c71d027e9d97-service-ca-bundle\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.906038 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cca1b796-de4d-40d2-ab8b-de2fa132f859-metrics-tls\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.906068 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1bbe405-c2f4-40b2-9569-486ba688d2d2-serving-cert\") pod \"service-ca-operator-777779d784-8x7wj\" (UID: \"b1bbe405-c2f4-40b2-9569-486ba688d2d2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.906100 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5faad20b-1dd5-40df-8b0a-02890b547838-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.906187 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e26524b-c8ef-47d0-8a03-ee87cb3e06fe-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jqcw\" (UID: \"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.906560 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e34299cb-9872-41f2-b75b-5cfc1d309a56-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-qgkfv\" (UID: \"e34299cb-9872-41f2-b75b-5cfc1d309a56\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.906590 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf28713-ccae-4b06-bdb6-f52d0426ac47-config\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.906671 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e26524b-c8ef-47d0-8a03-ee87cb3e06fe-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jqcw\" (UID: \"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.909096 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2qjbv"] Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.911090 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3f949ab7-cad8-4a34-b419-42a3dd61a4fc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vtn5b\" (UID: \"3f949ab7-cad8-4a34-b419-42a3dd61a4fc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.911209 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1bbe405-c2f4-40b2-9569-486ba688d2d2-config\") pod \"service-ca-operator-777779d784-8x7wj\" (UID: \"b1bbe405-c2f4-40b2-9569-486ba688d2d2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.911349 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-bound-sa-token\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.911436 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-trusted-ca\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.911461 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct98d\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-kube-api-access-ct98d\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.911491 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cca1b796-de4d-40d2-ab8b-de2fa132f859-bound-sa-token\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.911523 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c01bb95-1d19-435a-9090-da58d2110922-config-volume\") pod \"collect-profiles-29401020-kvfxw\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.913485 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cca1b796-de4d-40d2-ab8b-de2fa132f859-trusted-ca\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.913666 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/996f4a60-505f-425c-ac79-c8df8033ed57-webhook-cert\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.913705 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e34299cb-9872-41f2-b75b-5cfc1d309a56-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-qgkfv\" (UID: \"e34299cb-9872-41f2-b75b-5cfc1d309a56\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.913742 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzxzp\" (UniqueName: \"kubernetes.io/projected/74dce578-27e9-4dc2-ac45-9019de15d559-kube-api-access-kzxzp\") pod \"control-plane-machine-set-operator-78cbb6b69f-j4vxg\" (UID: \"74dce578-27e9-4dc2-ac45-9019de15d559\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.913823 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.914882 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m922l\" (UniqueName: \"kubernetes.io/projected/4cf28713-ccae-4b06-bdb6-f52d0426ac47-kube-api-access-m922l\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.914941 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7566f9bb-44c5-4c74-b1d9-d8c1e3c61206-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zdldw\" (UID: \"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.914994 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gm5pk\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915036 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9aee268c-0a3a-4f5b-8449-c71d027e9d97-stats-auth\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915143 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw8v2\" (UniqueName: \"kubernetes.io/projected/b1bbe405-c2f4-40b2-9569-486ba688d2d2-kube-api-access-cw8v2\") pod \"service-ca-operator-777779d784-8x7wj\" (UID: \"b1bbe405-c2f4-40b2-9569-486ba688d2d2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915173 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fhx2\" (UniqueName: \"kubernetes.io/projected/24cfae3a-2b46-4e2e-93ec-7e7dc089af87-kube-api-access-7fhx2\") pod \"kube-storage-version-migrator-operator-b67b599dd-zcrgz\" (UID: \"24cfae3a-2b46-4e2e-93ec-7e7dc089af87\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915238 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw9j6\" (UniqueName: \"kubernetes.io/projected/b968a427-bae8-41af-a3b7-1ef108cefb0d-kube-api-access-qw9j6\") pod \"package-server-manager-789f6589d5-ztsnq\" (UID: \"b968a427-bae8-41af-a3b7-1ef108cefb0d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915380 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/996f4a60-505f-425c-ac79-c8df8033ed57-apiservice-cert\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915417 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28v78\" (UniqueName: \"kubernetes.io/projected/6dfd611b-2429-43f4-8ccb-9a5b2138a4df-kube-api-access-28v78\") pod \"catalog-operator-68c6474976-5tvvm\" (UID: \"6dfd611b-2429-43f4-8ccb-9a5b2138a4df\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915440 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76160710-34d0-4e08-8e47-9c61a730db60-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgs76\" (UID: \"76160710-34d0-4e08-8e47-9c61a730db60\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915594 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7566f9bb-44c5-4c74-b1d9-d8c1e3c61206-config\") pod \"kube-controller-manager-operator-78b949d7b-zdldw\" (UID: \"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915632 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76160710-34d0-4e08-8e47-9c61a730db60-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgs76\" (UID: \"76160710-34d0-4e08-8e47-9c61a730db60\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915666 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9aee268c-0a3a-4f5b-8449-c71d027e9d97-metrics-certs\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.915711 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gm5pk\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:56 crc kubenswrapper[4687]: E1125 09:05:56.916899 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:57.416871451 +0000 UTC m=+152.470511169 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.917905 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-registry-certificates\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.919053 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.919995 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3c01bb95-1d19-435a-9090-da58d2110922-secret-volume\") pod \"collect-profiles-29401020-kvfxw\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.920119 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cf28713-ccae-4b06-bdb6-f52d0426ac47-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.920166 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qfd7\" (UniqueName: \"kubernetes.io/projected/5de513b2-cac7-4fe5-b121-91bdb67abbd2-kube-api-access-2qfd7\") pod \"migrator-59844c95c7-kvhfg\" (UID: \"5de513b2-cac7-4fe5-b121-91bdb67abbd2\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.920228 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsn5t\" (UniqueName: \"kubernetes.io/projected/cca1b796-de4d-40d2-ab8b-de2fa132f859-kube-api-access-jsn5t\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.921709 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/74dce578-27e9-4dc2-ac45-9019de15d559-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-j4vxg\" (UID: \"74dce578-27e9-4dc2-ac45-9019de15d559\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.922329 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smvsb\" (UniqueName: \"kubernetes.io/projected/3c01bb95-1d19-435a-9090-da58d2110922-kube-api-access-smvsb\") pod \"collect-profiles-29401020-kvfxw\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.923058 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9c3bce2f-351a-4a20-95d0-404b4c117ee3-proxy-tls\") pod \"machine-config-controller-84d6567774-ntss9\" (UID: \"9c3bce2f-351a-4a20-95d0-404b4c117ee3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.923807 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5faad20b-1dd5-40df-8b0a-02890b547838-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.923856 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cf28713-ccae-4b06-bdb6-f52d0426ac47-service-ca-bundle\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.923886 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbnqg\" (UniqueName: \"kubernetes.io/projected/76160710-34d0-4e08-8e47-9c61a730db60-kube-api-access-dbnqg\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgs76\" (UID: \"76160710-34d0-4e08-8e47-9c61a730db60\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.924739 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs2bl\" (UniqueName: \"kubernetes.io/projected/d31aac44-f947-4eae-811c-9c0822a157d0-kube-api-access-xs2bl\") pod \"marketplace-operator-79b997595-gm5pk\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.925658 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdkrl\" (UniqueName: \"kubernetes.io/projected/8172038a-a448-40d0-834a-6f059d3f7738-kube-api-access-bdkrl\") pod \"multus-admission-controller-857f4d67dd-v6xqm\" (UID: \"8172038a-a448-40d0-834a-6f059d3f7738\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.925709 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/996f4a60-505f-425c-ac79-c8df8033ed57-tmpfs\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.925745 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7566f9bb-44c5-4c74-b1d9-d8c1e3c61206-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zdldw\" (UID: \"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.925812 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9aee268c-0a3a-4f5b-8449-c71d027e9d97-default-certificate\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.927431 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b968a427-bae8-41af-a3b7-1ef108cefb0d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-ztsnq\" (UID: \"b968a427-bae8-41af-a3b7-1ef108cefb0d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.928142 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sttv\" (UniqueName: \"kubernetes.io/projected/996f4a60-505f-425c-ac79-c8df8033ed57-kube-api-access-9sttv\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.928381 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9c3bce2f-351a-4a20-95d0-404b4c117ee3-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ntss9\" (UID: \"9c3bce2f-351a-4a20-95d0-404b4c117ee3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:56 crc kubenswrapper[4687]: I1125 09:05:56.963667 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:56.997203 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" Nov 25 09:05:57 crc kubenswrapper[4687]: W1125 09:05:57.018226 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9bd19fb_a226_460a_8164_5538673a3783.slice/crio-f98ecd5ec766628e116e65c35cd0085b380e8f5566cf12ca06e2471d77a03f55 WatchSource:0}: Error finding container f98ecd5ec766628e116e65c35cd0085b380e8f5566cf12ca06e2471d77a03f55: Status 404 returned error can't find the container with id f98ecd5ec766628e116e65c35cd0085b380e8f5566cf12ca06e2471d77a03f55 Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.029649 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.029873 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e34299cb-9872-41f2-b75b-5cfc1d309a56-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-qgkfv\" (UID: \"e34299cb-9872-41f2-b75b-5cfc1d309a56\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.029893 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf28713-ccae-4b06-bdb6-f52d0426ac47-config\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.029922 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e26524b-c8ef-47d0-8a03-ee87cb3e06fe-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jqcw\" (UID: \"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.029943 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3f949ab7-cad8-4a34-b419-42a3dd61a4fc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vtn5b\" (UID: \"3f949ab7-cad8-4a34-b419-42a3dd61a4fc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.029964 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-mountpoint-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.029981 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djv25\" (UniqueName: \"kubernetes.io/projected/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-kube-api-access-djv25\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.030021 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1bbe405-c2f4-40b2-9569-486ba688d2d2-config\") pod \"service-ca-operator-777779d784-8x7wj\" (UID: \"b1bbe405-c2f4-40b2-9569-486ba688d2d2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.030037 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-bound-sa-token\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.030071 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cca1b796-de4d-40d2-ab8b-de2fa132f859-bound-sa-token\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.030090 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c01bb95-1d19-435a-9090-da58d2110922-config-volume\") pod \"collect-profiles-29401020-kvfxw\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.030219 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:57.53019901 +0000 UTC m=+152.583838728 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.031563 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1bbe405-c2f4-40b2-9569-486ba688d2d2-config\") pod \"service-ca-operator-777779d784-8x7wj\" (UID: \"b1bbe405-c2f4-40b2-9569-486ba688d2d2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.031789 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf28713-ccae-4b06-bdb6-f52d0426ac47-config\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.030123 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-trusted-ca\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.031859 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct98d\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-kube-api-access-ct98d\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.031879 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cca1b796-de4d-40d2-ab8b-de2fa132f859-trusted-ca\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.031894 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzxzp\" (UniqueName: \"kubernetes.io/projected/74dce578-27e9-4dc2-ac45-9019de15d559-kube-api-access-kzxzp\") pod \"control-plane-machine-set-operator-78cbb6b69f-j4vxg\" (UID: \"74dce578-27e9-4dc2-ac45-9019de15d559\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.031928 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/996f4a60-505f-425c-ac79-c8df8033ed57-webhook-cert\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.031942 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e34299cb-9872-41f2-b75b-5cfc1d309a56-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-qgkfv\" (UID: \"e34299cb-9872-41f2-b75b-5cfc1d309a56\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.031962 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.031983 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m922l\" (UniqueName: \"kubernetes.io/projected/4cf28713-ccae-4b06-bdb6-f52d0426ac47-kube-api-access-m922l\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.031999 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7566f9bb-44c5-4c74-b1d9-d8c1e3c61206-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zdldw\" (UID: \"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.032014 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gm5pk\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.032031 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9aee268c-0a3a-4f5b-8449-c71d027e9d97-stats-auth\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.032046 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw8v2\" (UniqueName: \"kubernetes.io/projected/b1bbe405-c2f4-40b2-9569-486ba688d2d2-kube-api-access-cw8v2\") pod \"service-ca-operator-777779d784-8x7wj\" (UID: \"b1bbe405-c2f4-40b2-9569-486ba688d2d2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.032061 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fhx2\" (UniqueName: \"kubernetes.io/projected/24cfae3a-2b46-4e2e-93ec-7e7dc089af87-kube-api-access-7fhx2\") pod \"kube-storage-version-migrator-operator-b67b599dd-zcrgz\" (UID: \"24cfae3a-2b46-4e2e-93ec-7e7dc089af87\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.032079 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw9j6\" (UniqueName: \"kubernetes.io/projected/b968a427-bae8-41af-a3b7-1ef108cefb0d-kube-api-access-qw9j6\") pod \"package-server-manager-789f6589d5-ztsnq\" (UID: \"b968a427-bae8-41af-a3b7-1ef108cefb0d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.032101 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/917c4fd5-c131-4c47-b6df-b245776c017a-metrics-tls\") pod \"dns-default-cpbmq\" (UID: \"917c4fd5-c131-4c47-b6df-b245776c017a\") " pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.032143 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76160710-34d0-4e08-8e47-9c61a730db60-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgs76\" (UID: \"76160710-34d0-4e08-8e47-9c61a730db60\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.034013 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/996f4a60-505f-425c-ac79-c8df8033ed57-apiservice-cert\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.034061 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28v78\" (UniqueName: \"kubernetes.io/projected/6dfd611b-2429-43f4-8ccb-9a5b2138a4df-kube-api-access-28v78\") pod \"catalog-operator-68c6474976-5tvvm\" (UID: \"6dfd611b-2429-43f4-8ccb-9a5b2138a4df\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.034824 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c01bb95-1d19-435a-9090-da58d2110922-config-volume\") pod \"collect-profiles-29401020-kvfxw\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.035012 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7566f9bb-44c5-4c74-b1d9-d8c1e3c61206-config\") pod \"kube-controller-manager-operator-78b949d7b-zdldw\" (UID: \"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.035090 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cca1b796-de4d-40d2-ab8b-de2fa132f859-trusted-ca\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.035214 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:57.535200752 +0000 UTC m=+152.588840470 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.035558 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76160710-34d0-4e08-8e47-9c61a730db60-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgs76\" (UID: \"76160710-34d0-4e08-8e47-9c61a730db60\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.035630 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9aee268c-0a3a-4f5b-8449-c71d027e9d97-metrics-certs\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.035777 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-registration-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.035815 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gm5pk\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.035862 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqk66\" (UniqueName: \"kubernetes.io/projected/13f09c5e-2d79-4ff8-8f26-4bcee751eed6-kube-api-access-gqk66\") pod \"machine-config-server-jc6zc\" (UID: \"13f09c5e-2d79-4ff8-8f26-4bcee751eed6\") " pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.037533 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-trusted-ca\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.037663 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-registry-certificates\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.038477 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.039008 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7566f9bb-44c5-4c74-b1d9-d8c1e3c61206-config\") pod \"kube-controller-manager-operator-78b949d7b-zdldw\" (UID: \"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.039320 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3c01bb95-1d19-435a-9090-da58d2110922-secret-volume\") pod \"collect-profiles-29401020-kvfxw\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.039373 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cf28713-ccae-4b06-bdb6-f52d0426ac47-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.039841 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-registry-certificates\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.041438 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76160710-34d0-4e08-8e47-9c61a730db60-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgs76\" (UID: \"76160710-34d0-4e08-8e47-9c61a730db60\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.043651 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qfd7\" (UniqueName: \"kubernetes.io/projected/5de513b2-cac7-4fe5-b121-91bdb67abbd2-kube-api-access-2qfd7\") pod \"migrator-59844c95c7-kvhfg\" (UID: \"5de513b2-cac7-4fe5-b121-91bdb67abbd2\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.043745 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsn5t\" (UniqueName: \"kubernetes.io/projected/cca1b796-de4d-40d2-ab8b-de2fa132f859-kube-api-access-jsn5t\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.044144 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62plw\" (UniqueName: \"kubernetes.io/projected/917c4fd5-c131-4c47-b6df-b245776c017a-kube-api-access-62plw\") pod \"dns-default-cpbmq\" (UID: \"917c4fd5-c131-4c47-b6df-b245776c017a\") " pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.044217 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/74dce578-27e9-4dc2-ac45-9019de15d559-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-j4vxg\" (UID: \"74dce578-27e9-4dc2-ac45-9019de15d559\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.044247 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smvsb\" (UniqueName: \"kubernetes.io/projected/3c01bb95-1d19-435a-9090-da58d2110922-kube-api-access-smvsb\") pod \"collect-profiles-29401020-kvfxw\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.044302 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9c3bce2f-351a-4a20-95d0-404b4c117ee3-proxy-tls\") pod \"machine-config-controller-84d6567774-ntss9\" (UID: \"9c3bce2f-351a-4a20-95d0-404b4c117ee3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.044331 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/917c4fd5-c131-4c47-b6df-b245776c017a-config-volume\") pod \"dns-default-cpbmq\" (UID: \"917c4fd5-c131-4c47-b6df-b245776c017a\") " pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.044353 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5faad20b-1dd5-40df-8b0a-02890b547838-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.044404 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cf28713-ccae-4b06-bdb6-f52d0426ac47-service-ca-bundle\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.044430 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbnqg\" (UniqueName: \"kubernetes.io/projected/76160710-34d0-4e08-8e47-9c61a730db60-kube-api-access-dbnqg\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgs76\" (UID: \"76160710-34d0-4e08-8e47-9c61a730db60\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.044496 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs2bl\" (UniqueName: \"kubernetes.io/projected/d31aac44-f947-4eae-811c-9c0822a157d0-kube-api-access-xs2bl\") pod \"marketplace-operator-79b997595-gm5pk\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045308 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/44b4be83-3df2-46e2-bf91-9afa9992ea95-cert\") pod \"ingress-canary-tw4q9\" (UID: \"44b4be83-3df2-46e2-bf91-9afa9992ea95\") " pod="openshift-ingress-canary/ingress-canary-tw4q9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045357 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7566f9bb-44c5-4c74-b1d9-d8c1e3c61206-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zdldw\" (UID: \"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045391 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdkrl\" (UniqueName: \"kubernetes.io/projected/8172038a-a448-40d0-834a-6f059d3f7738-kube-api-access-bdkrl\") pod \"multus-admission-controller-857f4d67dd-v6xqm\" (UID: \"8172038a-a448-40d0-834a-6f059d3f7738\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045410 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/996f4a60-505f-425c-ac79-c8df8033ed57-tmpfs\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045436 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9aee268c-0a3a-4f5b-8449-c71d027e9d97-default-certificate\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045479 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b968a427-bae8-41af-a3b7-1ef108cefb0d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-ztsnq\" (UID: \"b968a427-bae8-41af-a3b7-1ef108cefb0d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045514 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sttv\" (UniqueName: \"kubernetes.io/projected/996f4a60-505f-425c-ac79-c8df8033ed57-kube-api-access-9sttv\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045539 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-socket-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045562 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-csi-data-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045588 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9c3bce2f-351a-4a20-95d0-404b4c117ee3-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ntss9\" (UID: \"9c3bce2f-351a-4a20-95d0-404b4c117ee3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045610 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/6dfd611b-2429-43f4-8ccb-9a5b2138a4df-profile-collector-cert\") pod \"catalog-operator-68c6474976-5tvvm\" (UID: \"6dfd611b-2429-43f4-8ccb-9a5b2138a4df\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045631 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-plugins-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045654 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/6dfd611b-2429-43f4-8ccb-9a5b2138a4df-srv-cert\") pod \"catalog-operator-68c6474976-5tvvm\" (UID: \"6dfd611b-2429-43f4-8ccb-9a5b2138a4df\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045754 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e26524b-c8ef-47d0-8a03-ee87cb3e06fe-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jqcw\" (UID: \"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045780 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-registry-tls\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045802 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nz62p\" (UniqueName: \"kubernetes.io/projected/3f949ab7-cad8-4a34-b419-42a3dd61a4fc-kube-api-access-nz62p\") pod \"olm-operator-6b444d44fb-vtn5b\" (UID: \"3f949ab7-cad8-4a34-b419-42a3dd61a4fc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045824 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24cfae3a-2b46-4e2e-93ec-7e7dc089af87-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-zcrgz\" (UID: \"24cfae3a-2b46-4e2e-93ec-7e7dc089af87\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045843 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vksl\" (UniqueName: \"kubernetes.io/projected/9aee268c-0a3a-4f5b-8449-c71d027e9d97-kube-api-access-8vksl\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045875 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3f949ab7-cad8-4a34-b419-42a3dd61a4fc-srv-cert\") pod \"olm-operator-6b444d44fb-vtn5b\" (UID: \"3f949ab7-cad8-4a34-b419-42a3dd61a4fc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045909 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8172038a-a448-40d0-834a-6f059d3f7738-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v6xqm\" (UID: \"8172038a-a448-40d0-834a-6f059d3f7738\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045931 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e34299cb-9872-41f2-b75b-5cfc1d309a56-config\") pod \"kube-apiserver-operator-766d6c64bb-qgkfv\" (UID: \"e34299cb-9872-41f2-b75b-5cfc1d309a56\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045952 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/13f09c5e-2d79-4ff8-8f26-4bcee751eed6-certs\") pod \"machine-config-server-jc6zc\" (UID: \"13f09c5e-2d79-4ff8-8f26-4bcee751eed6\") " pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.045972 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmpnr\" (UniqueName: \"kubernetes.io/projected/9c3bce2f-351a-4a20-95d0-404b4c117ee3-kube-api-access-lmpnr\") pod \"machine-config-controller-84d6567774-ntss9\" (UID: \"9c3bce2f-351a-4a20-95d0-404b4c117ee3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.046005 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4cf28713-ccae-4b06-bdb6-f52d0426ac47-serving-cert\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.046026 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24cfae3a-2b46-4e2e-93ec-7e7dc089af87-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-zcrgz\" (UID: \"24cfae3a-2b46-4e2e-93ec-7e7dc089af87\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.046047 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9aee268c-0a3a-4f5b-8449-c71d027e9d97-service-ca-bundle\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.046075 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cca1b796-de4d-40d2-ab8b-de2fa132f859-metrics-tls\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.046091 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1bbe405-c2f4-40b2-9569-486ba688d2d2-serving-cert\") pod \"service-ca-operator-777779d784-8x7wj\" (UID: \"b1bbe405-c2f4-40b2-9569-486ba688d2d2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.046112 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5faad20b-1dd5-40df-8b0a-02890b547838-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.046134 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtnww\" (UniqueName: \"kubernetes.io/projected/44b4be83-3df2-46e2-bf91-9afa9992ea95-kube-api-access-wtnww\") pod \"ingress-canary-tw4q9\" (UID: \"44b4be83-3df2-46e2-bf91-9afa9992ea95\") " pod="openshift-ingress-canary/ingress-canary-tw4q9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.046149 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/13f09c5e-2d79-4ff8-8f26-4bcee751eed6-node-bootstrap-token\") pod \"machine-config-server-jc6zc\" (UID: \"13f09c5e-2d79-4ff8-8f26-4bcee751eed6\") " pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.046182 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e26524b-c8ef-47d0-8a03-ee87cb3e06fe-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jqcw\" (UID: \"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.046302 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5faad20b-1dd5-40df-8b0a-02890b547838-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.047420 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gm5pk\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.048347 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/996f4a60-505f-425c-ac79-c8df8033ed57-tmpfs\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.051688 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e26524b-c8ef-47d0-8a03-ee87cb3e06fe-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jqcw\" (UID: \"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.051860 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9aee268c-0a3a-4f5b-8449-c71d027e9d97-service-ca-bundle\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.052099 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cf28713-ccae-4b06-bdb6-f52d0426ac47-service-ca-bundle\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.052227 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e34299cb-9872-41f2-b75b-5cfc1d309a56-config\") pod \"kube-apiserver-operator-766d6c64bb-qgkfv\" (UID: \"e34299cb-9872-41f2-b75b-5cfc1d309a56\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.052281 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9aee268c-0a3a-4f5b-8449-c71d027e9d97-stats-auth\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.055682 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5faad20b-1dd5-40df-8b0a-02890b547838-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.055850 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-zgglt"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.055893 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9c3bce2f-351a-4a20-95d0-404b4c117ee3-proxy-tls\") pod \"machine-config-controller-84d6567774-ntss9\" (UID: \"9c3bce2f-351a-4a20-95d0-404b4c117ee3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.056161 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7566f9bb-44c5-4c74-b1d9-d8c1e3c61206-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zdldw\" (UID: \"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.056478 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76160710-34d0-4e08-8e47-9c61a730db60-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgs76\" (UID: \"76160710-34d0-4e08-8e47-9c61a730db60\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.056494 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/996f4a60-505f-425c-ac79-c8df8033ed57-apiservice-cert\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.056692 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9aee268c-0a3a-4f5b-8449-c71d027e9d97-metrics-certs\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.056992 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24cfae3a-2b46-4e2e-93ec-7e7dc089af87-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-zcrgz\" (UID: \"24cfae3a-2b46-4e2e-93ec-7e7dc089af87\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.057001 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3c01bb95-1d19-435a-9090-da58d2110922-secret-volume\") pod \"collect-profiles-29401020-kvfxw\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.057149 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9aee268c-0a3a-4f5b-8449-c71d027e9d97-default-certificate\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.057233 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cf28713-ccae-4b06-bdb6-f52d0426ac47-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.057229 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3f949ab7-cad8-4a34-b419-42a3dd61a4fc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vtn5b\" (UID: \"3f949ab7-cad8-4a34-b419-42a3dd61a4fc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.057373 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e34299cb-9872-41f2-b75b-5cfc1d309a56-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-qgkfv\" (UID: \"e34299cb-9872-41f2-b75b-5cfc1d309a56\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.058301 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/996f4a60-505f-425c-ac79-c8df8033ed57-webhook-cert\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.058820 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gm5pk\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.063121 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9c3bce2f-351a-4a20-95d0-404b4c117ee3-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ntss9\" (UID: \"9c3bce2f-351a-4a20-95d0-404b4c117ee3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.063489 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b968a427-bae8-41af-a3b7-1ef108cefb0d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-ztsnq\" (UID: \"b968a427-bae8-41af-a3b7-1ef108cefb0d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.063830 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e26524b-c8ef-47d0-8a03-ee87cb3e06fe-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jqcw\" (UID: \"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.063944 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3f949ab7-cad8-4a34-b419-42a3dd61a4fc-srv-cert\") pod \"olm-operator-6b444d44fb-vtn5b\" (UID: \"3f949ab7-cad8-4a34-b419-42a3dd61a4fc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.065163 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/6dfd611b-2429-43f4-8ccb-9a5b2138a4df-profile-collector-cert\") pod \"catalog-operator-68c6474976-5tvvm\" (UID: \"6dfd611b-2429-43f4-8ccb-9a5b2138a4df\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.066321 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-registry-tls\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.066930 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24cfae3a-2b46-4e2e-93ec-7e7dc089af87-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-zcrgz\" (UID: \"24cfae3a-2b46-4e2e-93ec-7e7dc089af87\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.074572 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-v9zjk"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.076446 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8172038a-a448-40d0-834a-6f059d3f7738-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v6xqm\" (UID: \"8172038a-a448-40d0-834a-6f059d3f7738\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.081345 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/6dfd611b-2429-43f4-8ccb-9a5b2138a4df-srv-cert\") pod \"catalog-operator-68c6474976-5tvvm\" (UID: \"6dfd611b-2429-43f4-8ccb-9a5b2138a4df\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.084517 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4cf28713-ccae-4b06-bdb6-f52d0426ac47-serving-cert\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.084859 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/74dce578-27e9-4dc2-ac45-9019de15d559-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-j4vxg\" (UID: \"74dce578-27e9-4dc2-ac45-9019de15d559\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.085823 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cca1b796-de4d-40d2-ab8b-de2fa132f859-metrics-tls\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.086435 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e26524b-c8ef-47d0-8a03-ee87cb3e06fe-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jqcw\" (UID: \"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.094208 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1bbe405-c2f4-40b2-9569-486ba688d2d2-serving-cert\") pod \"service-ca-operator-777779d784-8x7wj\" (UID: \"b1bbe405-c2f4-40b2-9569-486ba688d2d2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.106989 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-bound-sa-token\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.123477 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw8v2\" (UniqueName: \"kubernetes.io/projected/b1bbe405-c2f4-40b2-9569-486ba688d2d2-kube-api-access-cw8v2\") pod \"service-ca-operator-777779d784-8x7wj\" (UID: \"b1bbe405-c2f4-40b2-9569-486ba688d2d2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.147026 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.147225 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djv25\" (UniqueName: \"kubernetes.io/projected/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-kube-api-access-djv25\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148227 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/917c4fd5-c131-4c47-b6df-b245776c017a-metrics-tls\") pod \"dns-default-cpbmq\" (UID: \"917c4fd5-c131-4c47-b6df-b245776c017a\") " pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148258 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-registration-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148274 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqk66\" (UniqueName: \"kubernetes.io/projected/13f09c5e-2d79-4ff8-8f26-4bcee751eed6-kube-api-access-gqk66\") pod \"machine-config-server-jc6zc\" (UID: \"13f09c5e-2d79-4ff8-8f26-4bcee751eed6\") " pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148308 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62plw\" (UniqueName: \"kubernetes.io/projected/917c4fd5-c131-4c47-b6df-b245776c017a-kube-api-access-62plw\") pod \"dns-default-cpbmq\" (UID: \"917c4fd5-c131-4c47-b6df-b245776c017a\") " pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148332 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/917c4fd5-c131-4c47-b6df-b245776c017a-config-volume\") pod \"dns-default-cpbmq\" (UID: \"917c4fd5-c131-4c47-b6df-b245776c017a\") " pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148359 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/44b4be83-3df2-46e2-bf91-9afa9992ea95-cert\") pod \"ingress-canary-tw4q9\" (UID: \"44b4be83-3df2-46e2-bf91-9afa9992ea95\") " pod="openshift-ingress-canary/ingress-canary-tw4q9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148394 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-socket-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148412 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-csi-data-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148436 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-plugins-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148471 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/13f09c5e-2d79-4ff8-8f26-4bcee751eed6-certs\") pod \"machine-config-server-jc6zc\" (UID: \"13f09c5e-2d79-4ff8-8f26-4bcee751eed6\") " pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148522 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtnww\" (UniqueName: \"kubernetes.io/projected/44b4be83-3df2-46e2-bf91-9afa9992ea95-kube-api-access-wtnww\") pod \"ingress-canary-tw4q9\" (UID: \"44b4be83-3df2-46e2-bf91-9afa9992ea95\") " pod="openshift-ingress-canary/ingress-canary-tw4q9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148541 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/13f09c5e-2d79-4ff8-8f26-4bcee751eed6-node-bootstrap-token\") pod \"machine-config-server-jc6zc\" (UID: \"13f09c5e-2d79-4ff8-8f26-4bcee751eed6\") " pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148566 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-mountpoint-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.148661 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-mountpoint-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.148731 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:57.648715496 +0000 UTC m=+152.702355214 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.149187 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-socket-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.149249 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-registration-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.149984 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-csi-data-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.149999 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-plugins-dir\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.152290 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/917c4fd5-c131-4c47-b6df-b245776c017a-config-volume\") pod \"dns-default-cpbmq\" (UID: \"917c4fd5-c131-4c47-b6df-b245776c017a\") " pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.152610 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/917c4fd5-c131-4c47-b6df-b245776c017a-metrics-tls\") pod \"dns-default-cpbmq\" (UID: \"917c4fd5-c131-4c47-b6df-b245776c017a\") " pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.153140 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fhx2\" (UniqueName: \"kubernetes.io/projected/24cfae3a-2b46-4e2e-93ec-7e7dc089af87-kube-api-access-7fhx2\") pod \"kube-storage-version-migrator-operator-b67b599dd-zcrgz\" (UID: \"24cfae3a-2b46-4e2e-93ec-7e7dc089af87\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.157401 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/13f09c5e-2d79-4ff8-8f26-4bcee751eed6-certs\") pod \"machine-config-server-jc6zc\" (UID: \"13f09c5e-2d79-4ff8-8f26-4bcee751eed6\") " pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.158045 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/13f09c5e-2d79-4ff8-8f26-4bcee751eed6-node-bootstrap-token\") pod \"machine-config-server-jc6zc\" (UID: \"13f09c5e-2d79-4ff8-8f26-4bcee751eed6\") " pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.158643 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/44b4be83-3df2-46e2-bf91-9afa9992ea95-cert\") pod \"ingress-canary-tw4q9\" (UID: \"44b4be83-3df2-46e2-bf91-9afa9992ea95\") " pod="openshift-ingress-canary/ingress-canary-tw4q9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.159973 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct98d\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-kube-api-access-ct98d\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.171742 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.182338 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw9j6\" (UniqueName: \"kubernetes.io/projected/b968a427-bae8-41af-a3b7-1ef108cefb0d-kube-api-access-qw9j6\") pod \"package-server-manager-789f6589d5-ztsnq\" (UID: \"b968a427-bae8-41af-a3b7-1ef108cefb0d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.216324 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzxzp\" (UniqueName: \"kubernetes.io/projected/74dce578-27e9-4dc2-ac45-9019de15d559-kube-api-access-kzxzp\") pod \"control-plane-machine-set-operator-78cbb6b69f-j4vxg\" (UID: \"74dce578-27e9-4dc2-ac45-9019de15d559\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.222927 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e34299cb-9872-41f2-b75b-5cfc1d309a56-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-qgkfv\" (UID: \"e34299cb-9872-41f2-b75b-5cfc1d309a56\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.238076 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.241201 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7566f9bb-44c5-4c74-b1d9-d8c1e3c61206-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zdldw\" (UID: \"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.247648 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.250450 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.250936 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:57.750921011 +0000 UTC m=+152.804560739 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.273643 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m922l\" (UniqueName: \"kubernetes.io/projected/4cf28713-ccae-4b06-bdb6-f52d0426ac47-kube-api-access-m922l\") pod \"authentication-operator-69f744f599-76gwh\" (UID: \"4cf28713-ccae-4b06-bdb6-f52d0426ac47\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.273960 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.296112 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cca1b796-de4d-40d2-ab8b-de2fa132f859-bound-sa-token\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.327133 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qfd7\" (UniqueName: \"kubernetes.io/projected/5de513b2-cac7-4fe5-b121-91bdb67abbd2-kube-api-access-2qfd7\") pod \"migrator-59844c95c7-kvhfg\" (UID: \"5de513b2-cac7-4fe5-b121-91bdb67abbd2\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.342716 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28v78\" (UniqueName: \"kubernetes.io/projected/6dfd611b-2429-43f4-8ccb-9a5b2138a4df-kube-api-access-28v78\") pod \"catalog-operator-68c6474976-5tvvm\" (UID: \"6dfd611b-2429-43f4-8ccb-9a5b2138a4df\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.344917 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.346073 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.347785 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsn5t\" (UniqueName: \"kubernetes.io/projected/cca1b796-de4d-40d2-ab8b-de2fa132f859-kube-api-access-jsn5t\") pod \"ingress-operator-5b745b69d9-mclwx\" (UID: \"cca1b796-de4d-40d2-ab8b-de2fa132f859\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.352087 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.352435 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:57.852420747 +0000 UTC m=+152.906060455 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.366522 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vksl\" (UniqueName: \"kubernetes.io/projected/9aee268c-0a3a-4f5b-8449-c71d027e9d97-kube-api-access-8vksl\") pod \"router-default-5444994796-p2nkd\" (UID: \"9aee268c-0a3a-4f5b-8449-c71d027e9d97\") " pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.381453 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-6gb62"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.384843 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbnqg\" (UniqueName: \"kubernetes.io/projected/76160710-34d0-4e08-8e47-9c61a730db60-kube-api-access-dbnqg\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgs76\" (UID: \"76160710-34d0-4e08-8e47-9c61a730db60\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:57 crc kubenswrapper[4687]: W1125 09:05:57.400575 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a45ab31_45db_4069_8da2_4c53cd2689ca.slice/crio-8e8dded4dbb95ac4bb3f08651683e96a690616c2cc6435bc966e386b7329819b WatchSource:0}: Error finding container 8e8dded4dbb95ac4bb3f08651683e96a690616c2cc6435bc966e386b7329819b: Status 404 returned error can't find the container with id 8e8dded4dbb95ac4bb3f08651683e96a690616c2cc6435bc966e386b7329819b Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.411353 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.412376 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs2bl\" (UniqueName: \"kubernetes.io/projected/d31aac44-f947-4eae-811c-9c0822a157d0-kube-api-access-xs2bl\") pod \"marketplace-operator-79b997595-gm5pk\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:57 crc kubenswrapper[4687]: W1125 09:05:57.416597 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f8678ea_08b9_4dcd_b70d_e19800c697e3.slice/crio-56ef48d195e31d14786a9b7a559900b6864a196e1f422a8ac18cf4c0305344b6 WatchSource:0}: Error finding container 56ef48d195e31d14786a9b7a559900b6864a196e1f422a8ac18cf4c0305344b6: Status 404 returned error can't find the container with id 56ef48d195e31d14786a9b7a559900b6864a196e1f422a8ac18cf4c0305344b6 Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.425539 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdkrl\" (UniqueName: \"kubernetes.io/projected/8172038a-a448-40d0-834a-6f059d3f7738-kube-api-access-bdkrl\") pod \"multus-admission-controller-857f4d67dd-v6xqm\" (UID: \"8172038a-a448-40d0-834a-6f059d3f7738\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.439215 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.453789 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.455295 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.458482 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sttv\" (UniqueName: \"kubernetes.io/projected/996f4a60-505f-425c-ac79-c8df8033ed57-kube-api-access-9sttv\") pod \"packageserver-d55dfcdfc-zzgvx\" (UID: \"996f4a60-505f-425c-ac79-c8df8033ed57\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.461524 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smvsb\" (UniqueName: \"kubernetes.io/projected/3c01bb95-1d19-435a-9090-da58d2110922-kube-api-access-smvsb\") pod \"collect-profiles-29401020-kvfxw\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.462789 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:57.962771556 +0000 UTC m=+153.016411274 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.478565 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.486209 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.488028 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmpnr\" (UniqueName: \"kubernetes.io/projected/9c3bce2f-351a-4a20-95d0-404b4c117ee3-kube-api-access-lmpnr\") pod \"machine-config-controller-84d6567774-ntss9\" (UID: \"9c3bce2f-351a-4a20-95d0-404b4c117ee3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.492982 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.502406 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.509213 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nz62p\" (UniqueName: \"kubernetes.io/projected/3f949ab7-cad8-4a34-b419-42a3dd61a4fc-kube-api-access-nz62p\") pod \"olm-operator-6b444d44fb-vtn5b\" (UID: \"3f949ab7-cad8-4a34-b419-42a3dd61a4fc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.512625 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vftf5"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.528086 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.538129 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.556895 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.557487 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.558612 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.058586872 +0000 UTC m=+153.112226590 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.558609 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djv25\" (UniqueName: \"kubernetes.io/projected/6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6-kube-api-access-djv25\") pod \"csi-hostpathplugin-cx6sn\" (UID: \"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6\") " pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.596062 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5jnmz"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.596111 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.600694 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqk66\" (UniqueName: \"kubernetes.io/projected/13f09c5e-2d79-4ff8-8f26-4bcee751eed6-kube-api-access-gqk66\") pod \"machine-config-server-jc6zc\" (UID: \"13f09c5e-2d79-4ff8-8f26-4bcee751eed6\") " pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.606433 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.606925 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.1069107 +0000 UTC m=+153.160550408 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.607284 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62plw\" (UniqueName: \"kubernetes.io/projected/917c4fd5-c131-4c47-b6df-b245776c017a-kube-api-access-62plw\") pod \"dns-default-cpbmq\" (UID: \"917c4fd5-c131-4c47-b6df-b245776c017a\") " pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.607908 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.622933 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.623321 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-zgglt" event={"ID":"fbe26cf0-9829-4e16-b4c6-24484b1e678a","Type":"ContainerStarted","Data":"d907ee286e1eb1e87a0a2cc250d13a7d423eb6b7c88736d63e8cbd20460c236f"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.624806 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.630106 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.630902 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtnww\" (UniqueName: \"kubernetes.io/projected/44b4be83-3df2-46e2-bf91-9afa9992ea95-kube-api-access-wtnww\") pod \"ingress-canary-tw4q9\" (UID: \"44b4be83-3df2-46e2-bf91-9afa9992ea95\") " pod="openshift-ingress-canary/ingress-canary-tw4q9" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.633479 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-xmxvb" event={"ID":"057a2f29-f877-40e5-9a25-d1a2d26918ad","Type":"ContainerStarted","Data":"1d2e2bd342f5b09a11d3f4d2ea0c0cd4250d87a2ccbb5e71cbedcf67bc9952de"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.633710 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-xmxvb" event={"ID":"057a2f29-f877-40e5-9a25-d1a2d26918ad","Type":"ContainerStarted","Data":"0d66ee879d68fa824008935e0b75496c695773d409d91e8be9889682828acd4a"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.633791 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-xmxvb" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.649291 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.649792 4687 patch_prober.go:28] interesting pod/downloads-7954f5f757-xmxvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.649817 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xmxvb" podUID="057a2f29-f877-40e5-9a25-d1a2d26918ad" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.651953 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-cpbmq" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.652165 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.660445 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-tw4q9" Nov 25 09:05:57 crc kubenswrapper[4687]: W1125 09:05:57.663355 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb307de3a_d363_4077_933e_68f51ae40158.slice/crio-c5b877b268dc6f27868e5dd73d209f7f3b839f072a43625d529f4467c9e58f90 WatchSource:0}: Error finding container c5b877b268dc6f27868e5dd73d209f7f3b839f072a43625d529f4467c9e58f90: Status 404 returned error can't find the container with id c5b877b268dc6f27868e5dd73d209f7f3b839f072a43625d529f4467c9e58f90 Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.663695 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" event={"ID":"efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3","Type":"ContainerStarted","Data":"212fd882dd48ceb6a308a99c00fdde64fd7ed2484085a2db7d03147485fd6d81"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.663732 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" event={"ID":"efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3","Type":"ContainerStarted","Data":"c146bed9773c453563bbe4b4233012f8b9385c9cfa45be8b42bdefffe3f15551"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.665106 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-68pmb"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.667677 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-jc6zc" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.676029 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.676301 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" event={"ID":"f9bd19fb-a226-460a-8164-5538673a3783","Type":"ContainerStarted","Data":"f98ecd5ec766628e116e65c35cd0085b380e8f5566cf12ca06e2471d77a03f55"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.691744 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.708316 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.709561 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.209524306 +0000 UTC m=+153.263164024 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.721013 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" event={"ID":"35177658-25cb-4a51-a14e-5fb925283ac8","Type":"ContainerStarted","Data":"0e6c52b09e1cdf1c0ae4326ec00f0a133b455e13883bd212cf23b8131dd36ea7"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.749660 4687 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-c7v75 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.26:6443/healthz\": dial tcp 10.217.0.26:6443: connect: connection refused" start-of-body= Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.749715 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.26:6443/healthz\": dial tcp 10.217.0.26:6443: connect: connection refused" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.761157 4687 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-2qjbv container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.761496 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" podUID="e8238e53-faf8-4dc1-a726-76368f0319be" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.766159 4687 generic.go:334] "Generic (PLEG): container finished" podID="31a14c62-0956-4d44-98f4-724da42f4e78" containerID="9e832a5d92172d0bd0349656172e9d9846bca0cd58dea45b7cf92c96c3418b56" exitCode=0 Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783295 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-6gb62" event={"ID":"9f8678ea-08b9-4dcd-b70d-e19800c697e3","Type":"ContainerStarted","Data":"56ef48d195e31d14786a9b7a559900b6864a196e1f422a8ac18cf4c0305344b6"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783348 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" event={"ID":"6202d20a-5377-4876-a2cb-700e4b0ccf60","Type":"ContainerStarted","Data":"4aed2bcaa5571a1fd3ac99d2f12ef3532412c0de60864d24581ea1002cf0d8c1"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783362 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" event={"ID":"69faad41-a827-4fd3-b43e-036297dc2c9f","Type":"ContainerStarted","Data":"80c6f0893dabe999197748c8e16143445b20dd3101831aae70776808e6ab35fa"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783387 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783412 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783424 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" event={"ID":"69faad41-a827-4fd3-b43e-036297dc2c9f","Type":"ContainerStarted","Data":"6adad7d5702b7f3841a544aa3960d52c0f1eb70c482375eab20326e4b9768c82"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783437 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" event={"ID":"e8238e53-faf8-4dc1-a726-76368f0319be","Type":"ContainerStarted","Data":"222efd062506b9f3ecc0569190926354a4c20ed7a93ea82673be84e3330a2b15"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783449 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" event={"ID":"5cffffee-7f08-4c94-bf30-15419e5e91e2","Type":"ContainerStarted","Data":"90966bf3517878455b66eb60139ad143f7324e0b820269fd4604f5340ca8f3aa"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783462 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" event={"ID":"0a45ab31-45db-4069-8da2-4c53cd2689ca","Type":"ContainerStarted","Data":"8e8dded4dbb95ac4bb3f08651683e96a690616c2cc6435bc966e386b7329819b"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783474 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" event={"ID":"25aecd46-36d8-4ee9-bae5-4731e91b5e74","Type":"ContainerStarted","Data":"0624b7150ec7906b71bdc0ff643159d70421894a9ef67e25510752551dbf6b5b"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783489 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" event={"ID":"31a14c62-0956-4d44-98f4-724da42f4e78","Type":"ContainerDied","Data":"9e832a5d92172d0bd0349656172e9d9846bca0cd58dea45b7cf92c96c3418b56"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783521 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" event={"ID":"31a14c62-0956-4d44-98f4-724da42f4e78","Type":"ContainerStarted","Data":"2ccd8595b0668e9bff68c0dae03d440c156b3f59f144faa538129bcdbb314086"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783534 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" event={"ID":"e0750137-60d4-4ea8-be2b-097a562d4b2f","Type":"ContainerStarted","Data":"1b08b859a51b722c4767418467ba01ea6007347a41c69b74465e46d09e0bdebb"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.783547 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" event={"ID":"e0750137-60d4-4ea8-be2b-097a562d4b2f","Type":"ContainerStarted","Data":"f898bf64a1369f9e7899c14009aab9fd4950ed784a4dc8974a55692c7c70a46b"} Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.809452 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.812227 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.312212263 +0000 UTC m=+153.365851981 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: W1125 09:05:57.867538 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb968a427_bae8_41af_a3b7_1ef108cefb0d.slice/crio-94a84f067797ac22bf7e1aa3067b708f15a5f28604c3452624e901b9f44a7c10 WatchSource:0}: Error finding container 94a84f067797ac22bf7e1aa3067b708f15a5f28604c3452624e901b9f44a7c10: Status 404 returned error can't find the container with id 94a84f067797ac22bf7e1aa3067b708f15a5f28604c3452624e901b9f44a7c10 Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.912869 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:57 crc kubenswrapper[4687]: E1125 09:05:57.913675 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.413659107 +0000 UTC m=+153.467298815 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.934125 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz"] Nov 25 09:05:57 crc kubenswrapper[4687]: I1125 09:05:57.956250 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-76gwh"] Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.017364 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.018044 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.518026949 +0000 UTC m=+153.571666667 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.022069 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj"] Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.119439 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.119648 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.619615607 +0000 UTC m=+153.673255325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.119805 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.120230 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.620202503 +0000 UTC m=+153.673842221 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.184167 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw"] Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.221430 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.221725 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.721710108 +0000 UTC m=+153.775349826 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.323260 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.323605 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.823585874 +0000 UTC m=+153.877225592 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.427297 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.427660 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:58.927642557 +0000 UTC m=+153.981282275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.530209 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.530877 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:59.030862359 +0000 UTC m=+154.084502077 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.547315 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv"] Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.567860 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-tw4q9"] Nov 25 09:05:58 crc kubenswrapper[4687]: W1125 09:05:58.602354 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode34299cb_9872_41f2_b75b_5cfc1d309a56.slice/crio-bcb974aac8657db8abf0001483eb022f3ec674afc47084246f6af73a0a404c30 WatchSource:0}: Error finding container bcb974aac8657db8abf0001483eb022f3ec674afc47084246f6af73a0a404c30: Status 404 returned error can't find the container with id bcb974aac8657db8abf0001483eb022f3ec674afc47084246f6af73a0a404c30 Nov 25 09:05:58 crc kubenswrapper[4687]: W1125 09:05:58.625667 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44b4be83_3df2_46e2_bf91_9afa9992ea95.slice/crio-e901f163109046052eeb13c7037e66e09280ad8694955e8b561f35d13bf21789 WatchSource:0}: Error finding container e901f163109046052eeb13c7037e66e09280ad8694955e8b561f35d13bf21789: Status 404 returned error can't find the container with id e901f163109046052eeb13c7037e66e09280ad8694955e8b561f35d13bf21789 Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.633036 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.633597 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:59.133580687 +0000 UTC m=+154.187220405 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.656249 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-xmxvb" podStartSLOduration=129.656225997 podStartE2EDuration="2m9.656225997s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:58.619748351 +0000 UTC m=+153.673388099" watchObservedRunningTime="2025-11-25 09:05:58.656225997 +0000 UTC m=+153.709865715" Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.743044 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.743465 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:59.243450404 +0000 UTC m=+154.297090122 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.744206 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9"] Nov 25 09:05:58 crc kubenswrapper[4687]: W1125 09:05:58.781858 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c3bce2f_351a_4a20_95d0_404b4c117ee3.slice/crio-0866c30cba5573ea49478a67234df3fd10e869b6f445135f88c97ce814ced32a WatchSource:0}: Error finding container 0866c30cba5573ea49478a67234df3fd10e869b6f445135f88c97ce814ced32a: Status 404 returned error can't find the container with id 0866c30cba5573ea49478a67234df3fd10e869b6f445135f88c97ce814ced32a Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.787488 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-jc6zc" event={"ID":"13f09c5e-2d79-4ff8-8f26-4bcee751eed6","Type":"ContainerStarted","Data":"a249579e8b9d79ec62eebc8173eb6da2268ad7e317812f15bf596865e49bdcb6"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.791595 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" event={"ID":"e8238e53-faf8-4dc1-a726-76368f0319be","Type":"ContainerStarted","Data":"0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.794680 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" event={"ID":"24cfae3a-2b46-4e2e-93ec-7e7dc089af87","Type":"ContainerStarted","Data":"89f50f1c470f14d6e2e5881c47f0972d3469a60f3462cfa893eb7294d3ca89d2"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.794718 4687 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-2qjbv container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.794760 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" podUID="e8238e53-faf8-4dc1-a726-76368f0319be" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.801261 4687 generic.go:334] "Generic (PLEG): container finished" podID="6202d20a-5377-4876-a2cb-700e4b0ccf60" containerID="9fb3a63587f74f9421e0a377c8a3cd173686051ca0b8aee324ac77a2c6c60aa5" exitCode=0 Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.801784 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" event={"ID":"6202d20a-5377-4876-a2cb-700e4b0ccf60","Type":"ContainerDied","Data":"9fb3a63587f74f9421e0a377c8a3cd173686051ca0b8aee324ac77a2c6c60aa5"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.813213 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-zgglt" event={"ID":"fbe26cf0-9829-4e16-b4c6-24484b1e678a","Type":"ContainerStarted","Data":"5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.822763 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" event={"ID":"25aecd46-36d8-4ee9-bae5-4731e91b5e74","Type":"ContainerStarted","Data":"75545d8fbfb76b98dcb56c85d17eb494ee57ab6509073cfaaa969e64fddf11f0"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.823803 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-p2nkd" event={"ID":"9aee268c-0a3a-4f5b-8449-c71d027e9d97","Type":"ContainerStarted","Data":"7580bcbca79e3a3d40228d4336049d71036ea9fab9e35fe7d6da5d70a3776bb1"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.826676 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" event={"ID":"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206","Type":"ContainerStarted","Data":"ac2fe1a88a792b6ef4a9becf74e300a36bc7b83358d7df45a79084059de51558"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.829111 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" event={"ID":"e0750137-60d4-4ea8-be2b-097a562d4b2f","Type":"ContainerStarted","Data":"eaedf2b419f6ecdf4399b11bc1e9b5682d4245164efcad58ff26b7b059764885"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.834260 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" event={"ID":"b307de3a-d363-4077-933e-68f51ae40158","Type":"ContainerStarted","Data":"c5b877b268dc6f27868e5dd73d209f7f3b839f072a43625d529f4467c9e58f90"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.842053 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" event={"ID":"4cf28713-ccae-4b06-bdb6-f52d0426ac47","Type":"ContainerStarted","Data":"45b4f9ea6f53bf728ee5504342b3b827658115b01cdb7992d01a215b110e6423"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.845078 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.845468 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:59.345451454 +0000 UTC m=+154.399091172 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.847286 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-tw4q9" event={"ID":"44b4be83-3df2-46e2-bf91-9afa9992ea95","Type":"ContainerStarted","Data":"e901f163109046052eeb13c7037e66e09280ad8694955e8b561f35d13bf21789"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.890972 4687 generic.go:334] "Generic (PLEG): container finished" podID="5cffffee-7f08-4c94-bf30-15419e5e91e2" containerID="167490eb65bb017b4ae07d3e334ce4537639a64a74a652e41bb528ddeaf42106" exitCode=0 Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.891141 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" event={"ID":"5cffffee-7f08-4c94-bf30-15419e5e91e2","Type":"ContainerDied","Data":"167490eb65bb017b4ae07d3e334ce4537639a64a74a652e41bb528ddeaf42106"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.928544 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" event={"ID":"efdd22a2-19ab-40f6-8a0d-fdbfdc3dbff3","Type":"ContainerStarted","Data":"f6ba99d55076dfcfd900f8486df0beacc21a8c0297660ce88ace7c1ac5f76477"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.947151 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:58 crc kubenswrapper[4687]: E1125 09:05:58.950595 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:59.450580075 +0000 UTC m=+154.504219793 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.952772 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" event={"ID":"b968a427-bae8-41af-a3b7-1ef108cefb0d","Type":"ContainerStarted","Data":"94a84f067797ac22bf7e1aa3067b708f15a5f28604c3452624e901b9f44a7c10"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.955909 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" event={"ID":"e34299cb-9872-41f2-b75b-5cfc1d309a56","Type":"ContainerStarted","Data":"bcb974aac8657db8abf0001483eb022f3ec674afc47084246f6af73a0a404c30"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.973556 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" event={"ID":"e9c2451a-27de-43d2-a9e8-90f33ab30ce1","Type":"ContainerStarted","Data":"6a5376562b1cd908106a8ee8fd63a44f85be07e7b7f781753e2f8489618c305b"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.985148 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" event={"ID":"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe","Type":"ContainerStarted","Data":"f2f84364e59be53006bbd323f359fdfbadc6f4fba3da764aca48eb314b90b831"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.987255 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" event={"ID":"0a45ab31-45db-4069-8da2-4c53cd2689ca","Type":"ContainerStarted","Data":"ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.988100 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.989775 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" event={"ID":"b1bbe405-c2f4-40b2-9569-486ba688d2d2","Type":"ContainerStarted","Data":"ea3d2abf8994f88f664e1021cb163216dea3abbe1bbba1eefda14e5a997066c3"} Nov 25 09:05:58 crc kubenswrapper[4687]: I1125 09:05:58.997384 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" event={"ID":"f9bd19fb-a226-460a-8164-5538673a3783","Type":"ContainerStarted","Data":"1ff2f3c23ae2c0a7e437f5ddb22561d41e67bf8a632b8d857ee0f1f418a76dca"} Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.003358 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.006909 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" event={"ID":"35177658-25cb-4a51-a14e-5fb925283ac8","Type":"ContainerStarted","Data":"d415985ac311845e0f8f9f0e571b778ac8bb17903cd2f074e514663914046ed7"} Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.014391 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" event={"ID":"f8521f89-513c-41da-897e-9ea6fc278c21","Type":"ContainerStarted","Data":"7c66808ec11b5c602d04f533d13c54668a589bad1dff54cd46c8bd19bf80ebf0"} Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.019252 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-6gb62" event={"ID":"9f8678ea-08b9-4dcd-b70d-e19800c697e3","Type":"ContainerStarted","Data":"38173cbb32287dba5cc3e7cf4ff099203996cb8a2598f851a2cefe9d85fe0b58"} Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.019784 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.022919 4687 patch_prober.go:28] interesting pod/console-operator-58897d9998-6gb62 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.023020 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-6gb62" podUID="9f8678ea-08b9-4dcd-b70d-e19800c697e3" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.026083 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" event={"ID":"a3ec0e37-3e46-44e2-97ac-e7a81b8255b5","Type":"ContainerStarted","Data":"3e5140b9d99c3981882fee4c67b34f7242a0dd706cb95ecfb8500371df90802b"} Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.026254 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" podStartSLOduration=130.026138525 podStartE2EDuration="2m10.026138525s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:59.023592557 +0000 UTC m=+154.077232285" watchObservedRunningTime="2025-11-25 09:05:59.026138525 +0000 UTC m=+154.079778243" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.027363 4687 patch_prober.go:28] interesting pod/downloads-7954f5f757-xmxvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.027424 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xmxvb" podUID="057a2f29-f877-40e5-9a25-d1a2d26918ad" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.034360 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.051370 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:59 crc kubenswrapper[4687]: E1125 09:05:59.055352 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:59.555326977 +0000 UTC m=+154.608966695 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:59 crc kubenswrapper[4687]: W1125 09:05:59.083794 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcca1b796_de4d_40d2_ab8b_de2fa132f859.slice/crio-85e165d443c7dabe2e427c70baca8a285f980967781b49538fc8bfae77a3ba2b WatchSource:0}: Error finding container 85e165d443c7dabe2e427c70baca8a285f980967781b49538fc8bfae77a3ba2b: Status 404 returned error can't find the container with id 85e165d443c7dabe2e427c70baca8a285f980967781b49538fc8bfae77a3ba2b Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.157280 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:59 crc kubenswrapper[4687]: E1125 09:05:59.158218 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:59.658205689 +0000 UTC m=+154.711845397 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.187946 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-cpbmq"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.204617 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.211038 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.225663 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.235151 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-cx6sn"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.258553 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:59 crc kubenswrapper[4687]: E1125 09:05:59.258860 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:59.758833682 +0000 UTC m=+154.812473400 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.261873 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.265946 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v6xqm"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.270625 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.272028 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.295098 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gm5pk"] Nov 25 09:05:59 crc kubenswrapper[4687]: W1125 09:05:59.300178 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod917c4fd5_c131_4c47_b6df_b245776c017a.slice/crio-818d9bbc72177e8768f0ff896b39abe21ed8d9a285160be1492b729c3b205b09 WatchSource:0}: Error finding container 818d9bbc72177e8768f0ff896b39abe21ed8d9a285160be1492b729c3b205b09: Status 404 returned error can't find the container with id 818d9bbc72177e8768f0ff896b39abe21ed8d9a285160be1492b729c3b205b09 Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.302920 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg"] Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.361231 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:59 crc kubenswrapper[4687]: E1125 09:05:59.361625 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:05:59.861611502 +0000 UTC m=+154.915251220 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:59 crc kubenswrapper[4687]: W1125 09:05:59.433604 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8172038a_a448_40d0_834a_6f059d3f7738.slice/crio-aaaabd30bc206bc8a62716e077464d4f935cefc4f1ffa665acd618d9ffd4aedc WatchSource:0}: Error finding container aaaabd30bc206bc8a62716e077464d4f935cefc4f1ffa665acd618d9ffd4aedc: Status 404 returned error can't find the container with id aaaabd30bc206bc8a62716e077464d4f935cefc4f1ffa665acd618d9ffd4aedc Nov 25 09:05:59 crc kubenswrapper[4687]: W1125 09:05:59.454183 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74dce578_27e9_4dc2_ac45_9019de15d559.slice/crio-54744748a74072d07e80658ebf8d210a24c15c39803798db0d63f3598cc665c8 WatchSource:0}: Error finding container 54744748a74072d07e80658ebf8d210a24c15c39803798db0d63f3598cc665c8: Status 404 returned error can't find the container with id 54744748a74072d07e80658ebf8d210a24c15c39803798db0d63f3598cc665c8 Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.458892 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" podStartSLOduration=130.458873165 podStartE2EDuration="2m10.458873165s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:59.393578437 +0000 UTC m=+154.447218155" watchObservedRunningTime="2025-11-25 09:05:59.458873165 +0000 UTC m=+154.512512893" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.463030 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:59 crc kubenswrapper[4687]: E1125 09:05:59.463405 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:05:59.963376994 +0000 UTC m=+155.017016712 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.465810 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:05:59 crc kubenswrapper[4687]: W1125 09:05:59.525699 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd31aac44_f947_4eae_811c_9c0822a157d0.slice/crio-f7014fdfd1337028d98c2d8dc8ec25c66c18ffc0299542f2999c6b5025bd4389 WatchSource:0}: Error finding container f7014fdfd1337028d98c2d8dc8ec25c66c18ffc0299542f2999c6b5025bd4389: Status 404 returned error can't find the container with id f7014fdfd1337028d98c2d8dc8ec25c66c18ffc0299542f2999c6b5025bd4389 Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.564236 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:59 crc kubenswrapper[4687]: E1125 09:05:59.564633 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:00.064618513 +0000 UTC m=+155.118258231 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.651716 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-zgglt" podStartSLOduration=130.651687607 podStartE2EDuration="2m10.651687607s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:59.613968489 +0000 UTC m=+154.667608197" watchObservedRunningTime="2025-11-25 09:05:59.651687607 +0000 UTC m=+154.705327325" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.653074 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" podStartSLOduration=129.653066814 podStartE2EDuration="2m9.653066814s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:59.650462805 +0000 UTC m=+154.704102523" watchObservedRunningTime="2025-11-25 09:05:59.653066814 +0000 UTC m=+154.706706532" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.666705 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:59 crc kubenswrapper[4687]: E1125 09:05:59.667107 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:00.167052624 +0000 UTC m=+155.220692342 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.678368 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bskt8" podStartSLOduration=130.678340533 podStartE2EDuration="2m10.678340533s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:59.671380919 +0000 UTC m=+154.725020637" watchObservedRunningTime="2025-11-25 09:05:59.678340533 +0000 UTC m=+154.731980251" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.765375 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-6gb62" podStartSLOduration=130.765358955 podStartE2EDuration="2m10.765358955s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:59.764384789 +0000 UTC m=+154.818024517" watchObservedRunningTime="2025-11-25 09:05:59.765358955 +0000 UTC m=+154.818998673" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.768548 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:59 crc kubenswrapper[4687]: E1125 09:05:59.768862 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:00.268845557 +0000 UTC m=+155.322485265 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.872284 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:05:59 crc kubenswrapper[4687]: E1125 09:05:59.877114 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:00.377070541 +0000 UTC m=+155.430710259 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.912027 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" podStartSLOduration=129.912010846 podStartE2EDuration="2m9.912010846s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:59.845881585 +0000 UTC m=+154.899521303" watchObservedRunningTime="2025-11-25 09:05:59.912010846 +0000 UTC m=+154.965650564" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.919493 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6qjt6" podStartSLOduration=130.919473993 podStartE2EDuration="2m10.919473993s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:59.912579571 +0000 UTC m=+154.966219289" watchObservedRunningTime="2025-11-25 09:05:59.919473993 +0000 UTC m=+154.973113711" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.971931 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-c27v5" podStartSLOduration=130.97191566 podStartE2EDuration="2m10.97191566s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:59.968900701 +0000 UTC m=+155.022540429" watchObservedRunningTime="2025-11-25 09:05:59.97191566 +0000 UTC m=+155.025555378" Nov 25 09:05:59 crc kubenswrapper[4687]: I1125 09:05:59.973675 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:05:59 crc kubenswrapper[4687]: E1125 09:05:59.973979 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:00.473967555 +0000 UTC m=+155.527607263 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.056987 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" event={"ID":"8172038a-a448-40d0-834a-6f059d3f7738","Type":"ContainerStarted","Data":"aaaabd30bc206bc8a62716e077464d4f935cefc4f1ffa665acd618d9ffd4aedc"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.075984 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:00 crc kubenswrapper[4687]: E1125 09:06:00.076578 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:00.57656315 +0000 UTC m=+155.630202868 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.081585 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-jc6zc" event={"ID":"13f09c5e-2d79-4ff8-8f26-4bcee751eed6","Type":"ContainerStarted","Data":"6df2c1746787b077daedee26f3d0687ccf4ce3d2b0d66352e3c842fa12e1f705"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.090265 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" event={"ID":"9c3bce2f-351a-4a20-95d0-404b4c117ee3","Type":"ContainerStarted","Data":"ec60cf1872774d7c75126c5f2f2df854acf3741cc32d2e0c60bbe1b710628d4a"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.090327 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" event={"ID":"9c3bce2f-351a-4a20-95d0-404b4c117ee3","Type":"ContainerStarted","Data":"0866c30cba5573ea49478a67234df3fd10e869b6f445135f88c97ce814ced32a"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.125653 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-jc6zc" podStartSLOduration=6.125633508 podStartE2EDuration="6.125633508s" podCreationTimestamp="2025-11-25 09:05:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.124490138 +0000 UTC m=+155.178129856" watchObservedRunningTime="2025-11-25 09:06:00.125633508 +0000 UTC m=+155.179273226" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.125943 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xnvrp" podStartSLOduration=131.125939106 podStartE2EDuration="2m11.125939106s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.000890817 +0000 UTC m=+155.054530535" watchObservedRunningTime="2025-11-25 09:06:00.125939106 +0000 UTC m=+155.179578824" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.126142 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" event={"ID":"f8521f89-513c-41da-897e-9ea6fc278c21","Type":"ContainerStarted","Data":"8364295cd46b4cdc0a421acade76a9133766d7fdf94d66163eb1888546ed0e53"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.126218 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" event={"ID":"f8521f89-513c-41da-897e-9ea6fc278c21","Type":"ContainerStarted","Data":"18b103dfdf9f7a0e99ab2c2bd417af2a92bc0d59b5e0103e8d8cbb0ba9e4d6da"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.154457 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-p2nkd" event={"ID":"9aee268c-0a3a-4f5b-8449-c71d027e9d97","Type":"ContainerStarted","Data":"86d28aaaf7e2e267dd4a16eb34b36533df4bee3d9a5df8b5307cc8d12a4861c1"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.163012 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-9r7dh" podStartSLOduration=130.162994727 podStartE2EDuration="2m10.162994727s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.161894948 +0000 UTC m=+155.215534656" watchObservedRunningTime="2025-11-25 09:06:00.162994727 +0000 UTC m=+155.216634435" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.178406 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:00 crc kubenswrapper[4687]: E1125 09:06:00.189109 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:00.689084337 +0000 UTC m=+155.742724055 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.198364 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" event={"ID":"d31aac44-f947-4eae-811c-9c0822a157d0","Type":"ContainerStarted","Data":"f7014fdfd1337028d98c2d8dc8ec25c66c18ffc0299542f2999c6b5025bd4389"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.217431 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" event={"ID":"6dfd611b-2429-43f4-8ccb-9a5b2138a4df","Type":"ContainerStarted","Data":"eae4f3941296de459135bd84ec61549e7b18dce3089ebd2c5e0aa1c1163e5616"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.217470 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" event={"ID":"6dfd611b-2429-43f4-8ccb-9a5b2138a4df","Type":"ContainerStarted","Data":"a35ab3439ab1217983c626942c814f7d9d06f66575420b6132d6513c9e0b5307"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.218389 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.224656 4687 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-5tvvm container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.224718 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" podUID="6dfd611b-2429-43f4-8ccb-9a5b2138a4df" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.229554 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" event={"ID":"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6","Type":"ContainerStarted","Data":"064d2ebf86192a7e07b7ac4ec8cec32adcb7130f6e304df89976ae2faf245c2f"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.257640 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8x7wj" event={"ID":"b1bbe405-c2f4-40b2-9569-486ba688d2d2","Type":"ContainerStarted","Data":"2ba770f583d312bdf5e1b9a4663d24cd9cb6624167754955d4ca7a5f699f1be6"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.273986 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" event={"ID":"24cfae3a-2b46-4e2e-93ec-7e7dc089af87","Type":"ContainerStarted","Data":"6f529d6f4833eebfccbd51779bdb682d8bf872b724ce784967b8eb77a7a7028f"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.278286 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" event={"ID":"cca1b796-de4d-40d2-ab8b-de2fa132f859","Type":"ContainerStarted","Data":"f8e8f38884312bb75a2dd488d73ffa08cba2ffd50e0e8c9e8be4519dacc5f814"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.278345 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" event={"ID":"cca1b796-de4d-40d2-ab8b-de2fa132f859","Type":"ContainerStarted","Data":"85e165d443c7dabe2e427c70baca8a285f980967781b49538fc8bfae77a3ba2b"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.288952 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" event={"ID":"e34299cb-9872-41f2-b75b-5cfc1d309a56","Type":"ContainerStarted","Data":"8c9d53d696308bab0402373d7829391ec330c79524a6b919afedcbfcbb463752"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.294397 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:00 crc kubenswrapper[4687]: E1125 09:06:00.295576 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:00.795560255 +0000 UTC m=+155.849199973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.310670 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-p2nkd" podStartSLOduration=130.310652584 podStartE2EDuration="2m10.310652584s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.228030647 +0000 UTC m=+155.281670365" watchObservedRunningTime="2025-11-25 09:06:00.310652584 +0000 UTC m=+155.364292302" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.342721 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" event={"ID":"76160710-34d0-4e08-8e47-9c61a730db60","Type":"ContainerStarted","Data":"cbcc9ad91218dd99911e97f41501449e21652f6e739954276f42070e584ca16a"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.343530 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" podStartSLOduration=130.343495703 podStartE2EDuration="2m10.343495703s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.311455035 +0000 UTC m=+155.365094753" watchObservedRunningTime="2025-11-25 09:06:00.343495703 +0000 UTC m=+155.397135421" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.343855 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgkfv" podStartSLOduration=130.343850382 podStartE2EDuration="2m10.343850382s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.342185939 +0000 UTC m=+155.395825657" watchObservedRunningTime="2025-11-25 09:06:00.343850382 +0000 UTC m=+155.397490100" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.358963 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" event={"ID":"996f4a60-505f-425c-ac79-c8df8033ed57","Type":"ContainerStarted","Data":"2943b9094e4acdb4629b51e9caa32a08890d904317202422129b1c399f8ce5d4"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.359042 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.361990 4687 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zzgvx container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" start-of-body= Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.362055 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" podUID="996f4a60-505f-425c-ac79-c8df8033ed57" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.363403 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" event={"ID":"b968a427-bae8-41af-a3b7-1ef108cefb0d","Type":"ContainerStarted","Data":"fc9e372b2b7dfc33ae7b5bb2447f7267ad231770dfee0372b434a1b8bbf272d6"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.363469 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" event={"ID":"b968a427-bae8-41af-a3b7-1ef108cefb0d","Type":"ContainerStarted","Data":"2aeabf857488d9be9a39aee7594594e5f7acda5ec4e1826070286b4cd3a11211"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.364107 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.396749 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.396848 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-zcrgz" podStartSLOduration=130.396828784 podStartE2EDuration="2m10.396828784s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.395804387 +0000 UTC m=+155.449444115" watchObservedRunningTime="2025-11-25 09:06:00.396828784 +0000 UTC m=+155.450468502" Nov 25 09:06:00 crc kubenswrapper[4687]: E1125 09:06:00.399111 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:00.899084274 +0000 UTC m=+155.952723992 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.429374 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" event={"ID":"7566f9bb-44c5-4c74-b1d9-d8c1e3c61206","Type":"ContainerStarted","Data":"cc365f2d92a5bf8d48911f0bc8a7e06e570f8bb8456b251b9c6db65649a17fa0"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.452775 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" podStartSLOduration=130.452759124 podStartE2EDuration="2m10.452759124s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.452089706 +0000 UTC m=+155.505729434" watchObservedRunningTime="2025-11-25 09:06:00.452759124 +0000 UTC m=+155.506398842" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.468237 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" event={"ID":"3e26524b-c8ef-47d0-8a03-ee87cb3e06fe","Type":"ContainerStarted","Data":"cf5068936db298983c93adfb9821df0f29de441e801d9dc58d2a1d66245c9f96"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.501149 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:00 crc kubenswrapper[4687]: E1125 09:06:00.502354 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:01.002321726 +0000 UTC m=+156.055961444 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.503728 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" event={"ID":"3c01bb95-1d19-435a-9090-da58d2110922","Type":"ContainerStarted","Data":"c6afbb0b44001da091e7cabde2d909027467b5e4218991e9938654c39c287fd7"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.516263 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" podStartSLOduration=130.516242934 podStartE2EDuration="2m10.516242934s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.51417346 +0000 UTC m=+155.567813178" watchObservedRunningTime="2025-11-25 09:06:00.516242934 +0000 UTC m=+155.569882652" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.534779 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" event={"ID":"25aecd46-36d8-4ee9-bae5-4731e91b5e74","Type":"ContainerStarted","Data":"0b8fe893393c4b22ee60f8f5d53f74c9c7bcdedefe8ee962a0753c98293cffde"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.539632 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.553894 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:00 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:00 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:00 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.553952 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.563634 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg" event={"ID":"5de513b2-cac7-4fe5-b121-91bdb67abbd2","Type":"ContainerStarted","Data":"0b0d7b25528b956b7f5205c7a6a8709acb5855a4da00b4f4ff9cb3a8da72642a"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.564404 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" event={"ID":"3f949ab7-cad8-4a34-b419-42a3dd61a4fc","Type":"ContainerStarted","Data":"07dc6a1ea5a7b319e4dc9d09ddc3d02a9808f54a6839ddd7bcf104103c65d4db"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.565297 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" event={"ID":"b307de3a-d363-4077-933e-68f51ae40158","Type":"ContainerStarted","Data":"d182ea586d3182bd87e2a0bae4378ed1fcc1485f91967f9d9f8847802f0a2c1b"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.566563 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-cpbmq" event={"ID":"917c4fd5-c131-4c47-b6df-b245776c017a","Type":"ContainerStarted","Data":"818d9bbc72177e8768f0ff896b39abe21ed8d9a285160be1492b729c3b205b09"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.594924 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" event={"ID":"e9c2451a-27de-43d2-a9e8-90f33ab30ce1","Type":"ContainerStarted","Data":"e58dd7dee7a86bf96b8c3ca5dc9309f364dbdaf629e157f8aa2a96cc19ca4ed4"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.609389 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" event={"ID":"31a14c62-0956-4d44-98f4-724da42f4e78","Type":"ContainerStarted","Data":"0bc0ae6e53df2497dc7efe75ec5830e8c09a5b73be845c72bb902a959be62cfd"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.610197 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.611346 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:00 crc kubenswrapper[4687]: E1125 09:06:00.614421 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:01.114407261 +0000 UTC m=+156.168046979 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.623658 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" podStartSLOduration=131.623639376 podStartE2EDuration="2m11.623639376s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.622001543 +0000 UTC m=+155.675641261" watchObservedRunningTime="2025-11-25 09:06:00.623639376 +0000 UTC m=+155.677279094" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.647352 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zdldw" podStartSLOduration=130.647327483 podStartE2EDuration="2m10.647327483s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.576676283 +0000 UTC m=+155.630316001" watchObservedRunningTime="2025-11-25 09:06:00.647327483 +0000 UTC m=+155.700967201" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.661926 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" event={"ID":"4cf28713-ccae-4b06-bdb6-f52d0426ac47","Type":"ContainerStarted","Data":"20fb5230e36dd4ab2adf83cda062f55c53259a860db8bba614a2232499a36e91"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.685413 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-tw4q9" event={"ID":"44b4be83-3df2-46e2-bf91-9afa9992ea95","Type":"ContainerStarted","Data":"d93c7a438744b2305f1d9a8656e44684f05563dc14c23f032b3539645d7ed324"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.686121 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jqcw" podStartSLOduration=130.686081628 podStartE2EDuration="2m10.686081628s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.685651787 +0000 UTC m=+155.739291505" watchObservedRunningTime="2025-11-25 09:06:00.686081628 +0000 UTC m=+155.739721346" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.713132 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:00 crc kubenswrapper[4687]: E1125 09:06:00.722377 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:01.222338397 +0000 UTC m=+156.275978115 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.758241 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-76gwh" podStartSLOduration=131.758221807 podStartE2EDuration="2m11.758221807s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.755907526 +0000 UTC m=+155.809547264" watchObservedRunningTime="2025-11-25 09:06:00.758221807 +0000 UTC m=+155.811861525" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.760541 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" event={"ID":"5cffffee-7f08-4c94-bf30-15419e5e91e2","Type":"ContainerStarted","Data":"ecb522d45e5dc2031222295e858a8ca76a8001321ec15e18618e5efd974c5a1e"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.808672 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" podStartSLOduration=131.808644111 podStartE2EDuration="2m11.808644111s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.808054776 +0000 UTC m=+155.861694504" watchObservedRunningTime="2025-11-25 09:06:00.808644111 +0000 UTC m=+155.862283829" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.818834 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:00 crc kubenswrapper[4687]: E1125 09:06:00.821553 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:01.321495531 +0000 UTC m=+156.375135249 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.842808 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" event={"ID":"74dce578-27e9-4dc2-ac45-9019de15d559","Type":"ContainerStarted","Data":"54744748a74072d07e80658ebf8d210a24c15c39803798db0d63f3598cc665c8"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.892153 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" event={"ID":"a3ec0e37-3e46-44e2-97ac-e7a81b8255b5","Type":"ContainerStarted","Data":"1ebbb3c6d88b636c565eae6144c0a9851fa19b21433432bcf5cc8bfdac407470"} Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.919638 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:00 crc kubenswrapper[4687]: E1125 09:06:00.920320 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:01.420303216 +0000 UTC m=+156.473942934 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.920333 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.968084 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-vftf5" podStartSLOduration=130.96806228 podStartE2EDuration="2m10.96806228s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.864397917 +0000 UTC m=+155.918037635" watchObservedRunningTime="2025-11-25 09:06:00.96806228 +0000 UTC m=+156.021702008" Nov 25 09:06:00 crc kubenswrapper[4687]: I1125 09:06:00.983886 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-6gb62" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.021607 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.029904 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:01.529884826 +0000 UTC m=+156.583524724 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.054279 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-68pmb" podStartSLOduration=132.05425631 podStartE2EDuration="2m12.05425631s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:00.968941392 +0000 UTC m=+156.022581110" watchObservedRunningTime="2025-11-25 09:06:01.05425631 +0000 UTC m=+156.107896028" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.056239 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-v9zjk" podStartSLOduration=131.056231833 podStartE2EDuration="2m11.056231833s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:01.05387478 +0000 UTC m=+156.107514498" watchObservedRunningTime="2025-11-25 09:06:01.056231833 +0000 UTC m=+156.109871551" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.127652 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.128182 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:01.628161886 +0000 UTC m=+156.681801604 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.185554 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-tw4q9" podStartSLOduration=7.185541164 podStartE2EDuration="7.185541164s" podCreationTimestamp="2025-11-25 09:05:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:01.183953402 +0000 UTC m=+156.237593120" watchObservedRunningTime="2025-11-25 09:06:01.185541164 +0000 UTC m=+156.239180882" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.230462 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.230840 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:01.730825913 +0000 UTC m=+156.784465631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.331873 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.332618 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:01.832603196 +0000 UTC m=+156.886242904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.371313 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" podStartSLOduration=132.3712926 podStartE2EDuration="2m12.3712926s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:01.3183917 +0000 UTC m=+156.372031408" watchObservedRunningTime="2025-11-25 09:06:01.3712926 +0000 UTC m=+156.424932318" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.406561 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" podStartSLOduration=131.406547082 podStartE2EDuration="2m11.406547082s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:01.401039727 +0000 UTC m=+156.454679445" watchObservedRunningTime="2025-11-25 09:06:01.406547082 +0000 UTC m=+156.460186800" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.433371 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.433731 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:01.933714601 +0000 UTC m=+156.987354309 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.541126 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.541330 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.041299588 +0000 UTC m=+157.094939316 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.541622 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.541977 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.041966046 +0000 UTC m=+157.095605764 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.550338 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:01 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:01 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:01 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.550409 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.645214 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.645408 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.145369462 +0000 UTC m=+157.199009180 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.645594 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.648876 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.148850494 +0000 UTC m=+157.202490212 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.697068 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.697602 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.750194 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.753162 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.253141834 +0000 UTC m=+157.306781552 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.854116 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.854551 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.354535057 +0000 UTC m=+157.408174775 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.899056 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" event={"ID":"d31aac44-f947-4eae-811c-9c0822a157d0","Type":"ContainerStarted","Data":"3a2293f04b12d5c3b1c82bb4e57ad8d5a6ae17331129ef3244d477b16aa7e7e9"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.899352 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.900941 4687 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-gm5pk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.900986 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" podUID="d31aac44-f947-4eae-811c-9c0822a157d0" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.901955 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" event={"ID":"cca1b796-de4d-40d2-ab8b-de2fa132f859","Type":"ContainerStarted","Data":"f84a8b34a5bc504b8e85c38134f397605a415b74feef49cbd16cb8aca1e8591d"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.903331 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" event={"ID":"996f4a60-505f-425c-ac79-c8df8033ed57","Type":"ContainerStarted","Data":"8df631fbfe98b629970a4f614f1eaa1a643af518c5c8d4aa19d11ee208685ddd"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.904566 4687 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zzgvx container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" start-of-body= Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.904706 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" podUID="996f4a60-505f-425c-ac79-c8df8033ed57" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.905385 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg" event={"ID":"5de513b2-cac7-4fe5-b121-91bdb67abbd2","Type":"ContainerStarted","Data":"6696e161b88a1058f1c43c3222adf18a3c7e788a3846bf574f491d823c37dfac"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.905518 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg" event={"ID":"5de513b2-cac7-4fe5-b121-91bdb67abbd2","Type":"ContainerStarted","Data":"e63a7f02c4fcb502c41a310f98456133727b5e45bf8563736d6bda550c2e638d"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.906987 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" event={"ID":"9c3bce2f-351a-4a20-95d0-404b4c117ee3","Type":"ContainerStarted","Data":"7ee6054aea65bd46e5868fe20ca0e2b872fcc8193926745db2fd01f101f5f6a1"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.908976 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5jnmz" event={"ID":"a3ec0e37-3e46-44e2-97ac-e7a81b8255b5","Type":"ContainerStarted","Data":"58b68f3e8a0799587a6ec4c49fae7d6cfa3a81d9330602fb9006045ec49c37f7"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.910708 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" event={"ID":"8172038a-a448-40d0-834a-6f059d3f7738","Type":"ContainerStarted","Data":"176090e4a5c21532a9301c491a9dd2005bac51cb00f0ddc7689f5b2eb22fe3fc"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.910771 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" event={"ID":"8172038a-a448-40d0-834a-6f059d3f7738","Type":"ContainerStarted","Data":"be5b98ea8b44378187f59b139e096efbf2dfa879a2dad9ae16223bb897768c27"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.914404 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" event={"ID":"6202d20a-5377-4876-a2cb-700e4b0ccf60","Type":"ContainerStarted","Data":"c7bf5deddf3d10a9a810d7e8f6bdd68615be823324545d01bd261a396a31be96"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.914437 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" event={"ID":"6202d20a-5377-4876-a2cb-700e4b0ccf60","Type":"ContainerStarted","Data":"d220be851ccbce341e2285282e6b6268604a49ff12679e8354eb86bca586978b"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.916230 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" event={"ID":"3c01bb95-1d19-435a-9090-da58d2110922","Type":"ContainerStarted","Data":"133002c858286be0689e24f27dbf1e85be50b37602392c9a2138d17f16040629"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.918471 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-cpbmq" event={"ID":"917c4fd5-c131-4c47-b6df-b245776c017a","Type":"ContainerStarted","Data":"aae065350c36450e3deaa4dfc9137fbdc963d35e6e34d137877e5aa8b932b3fd"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.918518 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-cpbmq" event={"ID":"917c4fd5-c131-4c47-b6df-b245776c017a","Type":"ContainerStarted","Data":"1b43bdd2cf23656f23a8f23ef4ca4e4c45321018efe0d15d04145d2643ec7984"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.918570 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-cpbmq" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.920107 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" event={"ID":"3f949ab7-cad8-4a34-b419-42a3dd61a4fc","Type":"ContainerStarted","Data":"db55387ae042108f149c3058d2ec111f08efb2821dae9f86e31e80eda5bd496a"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.920465 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.921595 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" event={"ID":"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6","Type":"ContainerStarted","Data":"5f28f852599184e1f8113ebcedaeb7cb9d1604637c9f7d14f48f752afa2386b4"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.923082 4687 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-vtn5b container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.923117 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" podUID="3f949ab7-cad8-4a34-b419-42a3dd61a4fc" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.923666 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" event={"ID":"74dce578-27e9-4dc2-ac45-9019de15d559","Type":"ContainerStarted","Data":"d4366f587a6b080a65fd212ef9b1e1b78aa36666314753532fc40c19e1cb081b"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.925532 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" event={"ID":"76160710-34d0-4e08-8e47-9c61a730db60","Type":"ContainerStarted","Data":"d6ee7db30e2da34d611d18d30d04bba868ba6830500a878cda00fa67836c5494"} Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.955095 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.955266 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.455240632 +0000 UTC m=+157.508880350 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.955331 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:01 crc kubenswrapper[4687]: E1125 09:06:01.955635 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.455623241 +0000 UTC m=+157.509262959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.960908 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-v6xqm" podStartSLOduration=131.960894981 podStartE2EDuration="2m11.960894981s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:01.959605537 +0000 UTC m=+157.013245255" watchObservedRunningTime="2025-11-25 09:06:01.960894981 +0000 UTC m=+157.014534699" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.961775 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" podStartSLOduration=131.961769885 podStartE2EDuration="2m11.961769885s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:01.93022742 +0000 UTC m=+156.983867138" watchObservedRunningTime="2025-11-25 09:06:01.961769885 +0000 UTC m=+157.015409613" Nov 25 09:06:01 crc kubenswrapper[4687]: I1125 09:06:01.981318 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5tvvm" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.014214 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" podStartSLOduration=132.014197211 podStartE2EDuration="2m12.014197211s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:02.012382754 +0000 UTC m=+157.066022472" watchObservedRunningTime="2025-11-25 09:06:02.014197211 +0000 UTC m=+157.067836929" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.044298 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-cpbmq" podStartSLOduration=8.044281537 podStartE2EDuration="8.044281537s" podCreationTimestamp="2025-11-25 09:05:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:02.041288598 +0000 UTC m=+157.094928326" watchObservedRunningTime="2025-11-25 09:06:02.044281537 +0000 UTC m=+157.097921255" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.056771 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.057149 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.557116617 +0000 UTC m=+157.610756345 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.057628 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.062830 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.562802478 +0000 UTC m=+157.616442256 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.074799 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fq9w8" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.130060 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mclwx" podStartSLOduration=133.130039677 podStartE2EDuration="2m13.130039677s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:02.08405084 +0000 UTC m=+157.137690548" watchObservedRunningTime="2025-11-25 09:06:02.130039677 +0000 UTC m=+157.183679395" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.159520 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.159719 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.659688551 +0000 UTC m=+157.713328269 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.159779 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.160093 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.660084212 +0000 UTC m=+157.713723940 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.206007 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kvhfg" podStartSLOduration=132.205986046 podStartE2EDuration="2m12.205986046s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:02.131002732 +0000 UTC m=+157.184642450" watchObservedRunningTime="2025-11-25 09:06:02.205986046 +0000 UTC m=+157.259625764" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.262083 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.263082 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.763061997 +0000 UTC m=+157.816701715 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.302761 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntss9" podStartSLOduration=132.302743457 podStartE2EDuration="2m12.302743457s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:02.197524293 +0000 UTC m=+157.251164011" watchObservedRunningTime="2025-11-25 09:06:02.302743457 +0000 UTC m=+157.356383175" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.364596 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.364959 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.864943263 +0000 UTC m=+157.918582981 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.380700 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgs76" podStartSLOduration=133.380682059 podStartE2EDuration="2m13.380682059s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:02.305991642 +0000 UTC m=+157.359631360" watchObservedRunningTime="2025-11-25 09:06:02.380682059 +0000 UTC m=+157.434321777" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.466594 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.466932 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:02.966917721 +0000 UTC m=+158.020557439 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.487351 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-j4vxg" podStartSLOduration=132.487334851 podStartE2EDuration="2m12.487334851s" podCreationTimestamp="2025-11-25 09:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:02.388771484 +0000 UTC m=+157.442411202" watchObservedRunningTime="2025-11-25 09:06:02.487334851 +0000 UTC m=+157.540974569" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.549850 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:02 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:02 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:02 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.549898 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.551114 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" podStartSLOduration=133.551097258 podStartE2EDuration="2m13.551097258s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:02.489862328 +0000 UTC m=+157.543502046" watchObservedRunningTime="2025-11-25 09:06:02.551097258 +0000 UTC m=+157.604736976" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.567810 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.568061 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.068050267 +0000 UTC m=+158.121689985 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.663845 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.669296 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.669621 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.169589604 +0000 UTC m=+158.223229332 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.777436 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.777803 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.277788307 +0000 UTC m=+158.331428025 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.878222 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.878399 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.378374488 +0000 UTC m=+158.432014206 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.878887 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.879246 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.379229211 +0000 UTC m=+158.432868939 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.931481 4687 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-gm5pk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.931543 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" podUID="d31aac44-f947-4eae-811c-9c0822a157d0" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.945095 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vtn5b" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.952397 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bvkgm" Nov 25 09:06:02 crc kubenswrapper[4687]: I1125 09:06:02.979574 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:02 crc kubenswrapper[4687]: E1125 09:06:02.979850 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.479834264 +0000 UTC m=+158.533473982 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.080994 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:03 crc kubenswrapper[4687]: E1125 09:06:03.086616 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.586598348 +0000 UTC m=+158.640238066 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.183076 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:03 crc kubenswrapper[4687]: E1125 09:06:03.183396 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.683380589 +0000 UTC m=+158.737020307 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.284770 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:03 crc kubenswrapper[4687]: E1125 09:06:03.285094 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.785082571 +0000 UTC m=+158.838722289 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.339169 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gpptd"] Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.343752 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zzgvx" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.343879 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.350761 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.365631 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gpptd"] Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.387205 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:03 crc kubenswrapper[4687]: E1125 09:06:03.390684 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.890660184 +0000 UTC m=+158.944299892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.493368 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.493660 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-utilities\") pod \"certified-operators-gpptd\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.493689 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-catalog-content\") pod \"certified-operators-gpptd\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: E1125 09:06:03.493806 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:03.993787953 +0000 UTC m=+159.047427671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.493856 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-888bv\" (UniqueName: \"kubernetes.io/projected/ccde1639-05be-47c5-93b8-c1eb83167814-kube-api-access-888bv\") pod \"certified-operators-gpptd\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.537132 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-59vsx"] Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.538312 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.543364 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.549366 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:03 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:03 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:03 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.549452 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.564959 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-59vsx"] Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.595022 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.595221 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-catalog-content\") pod \"certified-operators-gpptd\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.595256 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-888bv\" (UniqueName: \"kubernetes.io/projected/ccde1639-05be-47c5-93b8-c1eb83167814-kube-api-access-888bv\") pod \"certified-operators-gpptd\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.595343 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-utilities\") pod \"certified-operators-gpptd\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.595741 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-utilities\") pod \"certified-operators-gpptd\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: E1125 09:06:03.595821 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:04.095804663 +0000 UTC m=+159.149444411 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.596058 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-catalog-content\") pod \"certified-operators-gpptd\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.626284 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-888bv\" (UniqueName: \"kubernetes.io/projected/ccde1639-05be-47c5-93b8-c1eb83167814-kube-api-access-888bv\") pod \"certified-operators-gpptd\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.679057 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.697033 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-utilities\") pod \"community-operators-59vsx\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.697161 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.697206 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-catalog-content\") pod \"community-operators-59vsx\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.697230 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl985\" (UniqueName: \"kubernetes.io/projected/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-kube-api-access-sl985\") pod \"community-operators-59vsx\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: E1125 09:06:03.697560 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:04.197545634 +0000 UTC m=+159.251185352 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.753403 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w8k2g"] Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.754404 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.785271 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w8k2g"] Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.798180 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.798668 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-catalog-content\") pod \"community-operators-59vsx\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.798693 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl985\" (UniqueName: \"kubernetes.io/projected/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-kube-api-access-sl985\") pod \"community-operators-59vsx\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.798728 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-utilities\") pod \"community-operators-59vsx\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.799097 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-utilities\") pod \"community-operators-59vsx\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: E1125 09:06:03.799388 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:04.299374609 +0000 UTC m=+159.353014327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.802743 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-catalog-content\") pod \"community-operators-59vsx\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.845981 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl985\" (UniqueName: \"kubernetes.io/projected/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-kube-api-access-sl985\") pod \"community-operators-59vsx\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.852811 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.899573 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-catalog-content\") pod \"certified-operators-w8k2g\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.899634 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.899665 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7rsq\" (UniqueName: \"kubernetes.io/projected/87d111a8-6128-4b3c-a3de-2143ab121856-kube-api-access-h7rsq\") pod \"certified-operators-w8k2g\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.899690 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-utilities\") pod \"certified-operators-w8k2g\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:03 crc kubenswrapper[4687]: E1125 09:06:03.899989 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:04.399978301 +0000 UTC m=+159.453618019 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.951299 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-49wvg"] Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.952344 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.959580 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" event={"ID":"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6","Type":"ContainerStarted","Data":"4f5083acd221b2ca0e3c142d049ac2306c40a44964299e826cff21f8cb75fa5d"} Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.959616 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" event={"ID":"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6","Type":"ContainerStarted","Data":"2d54ebce1bb7762211d28e95b12c77c2c16e91bdc62475d65f173c82e937b4ce"} Nov 25 09:06:03 crc kubenswrapper[4687]: I1125 09:06:03.981398 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-49wvg"] Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.000927 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.001180 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-catalog-content\") pod \"certified-operators-w8k2g\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.001245 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7rsq\" (UniqueName: \"kubernetes.io/projected/87d111a8-6128-4b3c-a3de-2143ab121856-kube-api-access-h7rsq\") pod \"certified-operators-w8k2g\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.001272 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-utilities\") pod \"certified-operators-w8k2g\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.001724 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-utilities\") pod \"certified-operators-w8k2g\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.001798 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:04.501783265 +0000 UTC m=+159.555422983 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.001985 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-catalog-content\") pod \"certified-operators-w8k2g\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.039366 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7rsq\" (UniqueName: \"kubernetes.io/projected/87d111a8-6128-4b3c-a3de-2143ab121856-kube-api-access-h7rsq\") pod \"certified-operators-w8k2g\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.099758 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.116169 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-utilities\") pod \"community-operators-49wvg\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.116328 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8bnj\" (UniqueName: \"kubernetes.io/projected/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-kube-api-access-s8bnj\") pod \"community-operators-49wvg\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.116372 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.116393 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-catalog-content\") pod \"community-operators-49wvg\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.119525 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:04.61949384 +0000 UTC m=+159.673133558 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.217377 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.217657 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8bnj\" (UniqueName: \"kubernetes.io/projected/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-kube-api-access-s8bnj\") pod \"community-operators-49wvg\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.217694 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-catalog-content\") pod \"community-operators-49wvg\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.217747 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-utilities\") pod \"community-operators-49wvg\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.218181 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-utilities\") pod \"community-operators-49wvg\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.218260 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:04.718243863 +0000 UTC m=+159.771883581 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.218744 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-catalog-content\") pod \"community-operators-49wvg\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.239947 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8bnj\" (UniqueName: \"kubernetes.io/projected/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-kube-api-access-s8bnj\") pod \"community-operators-49wvg\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.277928 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.321382 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.321907 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:04.821890235 +0000 UTC m=+159.875529953 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.398785 4687 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.423761 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.424134 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:04.92412028 +0000 UTC m=+159.977759998 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.425032 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gpptd"] Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.477271 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w8k2g"] Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.525600 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.526002 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:05.025987116 +0000 UTC m=+160.079626834 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.527164 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-59vsx"] Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.543672 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:04 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:04 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:04 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.543726 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.629774 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.629948 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-49wvg"] Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.630475 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:05.130456001 +0000 UTC m=+160.184095719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.726038 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.726915 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.729909 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.731639 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.731906 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:05.231895304 +0000 UTC m=+160.285535012 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.735708 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.736945 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.833416 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.833589 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:05.333562825 +0000 UTC m=+160.387202543 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.833665 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.833724 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.833766 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.834100 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:05.334090719 +0000 UTC m=+160.387730437 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.934336 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.934568 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:06:05.434539567 +0000 UTC m=+160.488179285 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.934712 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.934769 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.934824 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.934836 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:06:04 crc kubenswrapper[4687]: E1125 09:06:04.935119 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:06:05.435109231 +0000 UTC m=+160.488748959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2d9pq" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.948795 4687 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T09:06:04.398830391Z","Handler":null,"Name":""} Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.952416 4687 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.952451 4687 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.957474 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.966784 4687 generic.go:334] "Generic (PLEG): container finished" podID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerID="857eb552f61c3a9e3b15c4de08ef1a5fbd819b6ce644f30c70142d13fcd3151c" exitCode=0 Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.966869 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59vsx" event={"ID":"d8cf4b36-d7aa-4d9a-bc65-a893435ca244","Type":"ContainerDied","Data":"857eb552f61c3a9e3b15c4de08ef1a5fbd819b6ce644f30c70142d13fcd3151c"} Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.966901 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59vsx" event={"ID":"d8cf4b36-d7aa-4d9a-bc65-a893435ca244","Type":"ContainerStarted","Data":"ae8ee9a877e2f11d25fe74131e3fd0f30d39fc2dbbb9abf1c4236afa85b3d7cd"} Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.970448 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.970475 4687 generic.go:334] "Generic (PLEG): container finished" podID="87d111a8-6128-4b3c-a3de-2143ab121856" containerID="505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f" exitCode=0 Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.970516 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8k2g" event={"ID":"87d111a8-6128-4b3c-a3de-2143ab121856","Type":"ContainerDied","Data":"505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f"} Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.971022 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8k2g" event={"ID":"87d111a8-6128-4b3c-a3de-2143ab121856","Type":"ContainerStarted","Data":"a91af760a3b34c60d0e041244a410bd414868e2be57f3e1202f36296c8b8fd8e"} Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.980265 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" event={"ID":"6b169ba9-e2d8-4ca4-9341-dc8f2ae905d6","Type":"ContainerStarted","Data":"30493ebbafc6f9cb8f2fed3c7722ce06e4ff3ed0614ef8cede607a4909245053"} Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.989153 4687 generic.go:334] "Generic (PLEG): container finished" podID="3c01bb95-1d19-435a-9090-da58d2110922" containerID="133002c858286be0689e24f27dbf1e85be50b37602392c9a2138d17f16040629" exitCode=0 Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.989263 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" event={"ID":"3c01bb95-1d19-435a-9090-da58d2110922","Type":"ContainerDied","Data":"133002c858286be0689e24f27dbf1e85be50b37602392c9a2138d17f16040629"} Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.993034 4687 generic.go:334] "Generic (PLEG): container finished" podID="ccde1639-05be-47c5-93b8-c1eb83167814" containerID="fcd4d6f8223e9a7875ef9260966f5ea643985175eaf394ede8a2e1633b5ccc70" exitCode=0 Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.993117 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpptd" event={"ID":"ccde1639-05be-47c5-93b8-c1eb83167814","Type":"ContainerDied","Data":"fcd4d6f8223e9a7875ef9260966f5ea643985175eaf394ede8a2e1633b5ccc70"} Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.993154 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpptd" event={"ID":"ccde1639-05be-47c5-93b8-c1eb83167814","Type":"ContainerStarted","Data":"c9314e9adddc4b5ef379d8445e61e07600dec023437595d42608733c8ea2cbbe"} Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.996095 4687 generic.go:334] "Generic (PLEG): container finished" podID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerID="a1d5e5d6a972bc114ab031efa9249a41212ac90bb25d9e2f9354257e24fa8c09" exitCode=0 Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.996470 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49wvg" event={"ID":"614abb7b-e915-4b2a-9f57-bbd8ec4d2433","Type":"ContainerDied","Data":"a1d5e5d6a972bc114ab031efa9249a41212ac90bb25d9e2f9354257e24fa8c09"} Nov 25 09:06:04 crc kubenswrapper[4687]: I1125 09:06:04.996530 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49wvg" event={"ID":"614abb7b-e915-4b2a-9f57-bbd8ec4d2433","Type":"ContainerStarted","Data":"e144e3ea88ec45b90ae92b8df951526923ed0d55d4f68acdae674f7666e5d58b"} Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.033938 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-cx6sn" podStartSLOduration=11.033919037 podStartE2EDuration="11.033919037s" podCreationTimestamp="2025-11-25 09:05:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:05.032750645 +0000 UTC m=+160.086390383" watchObservedRunningTime="2025-11-25 09:06:05.033919037 +0000 UTC m=+160.087558755" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.036332 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.049031 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.138066 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.145258 4687 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.145300 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.168213 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2d9pq\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.182126 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.225900 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.362665 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:06:05 crc kubenswrapper[4687]: W1125 09:06:05.373086 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod7cd7dccf_a26c_4cba_a5b7_86a1b7b7c29d.slice/crio-eb7b6c708132c10f2529fa689ca365f06089f38ada179b8005d8647c27b7974c WatchSource:0}: Error finding container eb7b6c708132c10f2529fa689ca365f06089f38ada179b8005d8647c27b7974c: Status 404 returned error can't find the container with id eb7b6c708132c10f2529fa689ca365f06089f38ada179b8005d8647c27b7974c Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.538694 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nx4ps"] Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.539900 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.541559 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.541939 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:05 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:05 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:05 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.542017 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.544671 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx4ps"] Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.637308 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2d9pq"] Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.650678 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-utilities\") pod \"redhat-marketplace-nx4ps\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.650750 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjttc\" (UniqueName: \"kubernetes.io/projected/3afcc926-b324-4fe9-933c-4918a88619d9-kube-api-access-vjttc\") pod \"redhat-marketplace-nx4ps\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.650810 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-catalog-content\") pod \"redhat-marketplace-nx4ps\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.744543 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.752331 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-utilities\") pod \"redhat-marketplace-nx4ps\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.752385 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjttc\" (UniqueName: \"kubernetes.io/projected/3afcc926-b324-4fe9-933c-4918a88619d9-kube-api-access-vjttc\") pod \"redhat-marketplace-nx4ps\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.752445 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-catalog-content\") pod \"redhat-marketplace-nx4ps\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.753092 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-catalog-content\") pod \"redhat-marketplace-nx4ps\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.753125 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-utilities\") pod \"redhat-marketplace-nx4ps\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.770708 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjttc\" (UniqueName: \"kubernetes.io/projected/3afcc926-b324-4fe9-933c-4918a88619d9-kube-api-access-vjttc\") pod \"redhat-marketplace-nx4ps\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.861652 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.932376 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qsgtx"] Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.933536 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:05 crc kubenswrapper[4687]: I1125 09:06:05.955163 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qsgtx"] Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.003254 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" event={"ID":"5faad20b-1dd5-40df-8b0a-02890b547838","Type":"ContainerStarted","Data":"fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1"} Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.003614 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" event={"ID":"5faad20b-1dd5-40df-8b0a-02890b547838","Type":"ContainerStarted","Data":"8b7abc5b9998d3193490851ed6dafee840c21add9c3523faf7b61a7fe89c096d"} Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.003666 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.006604 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d","Type":"ContainerStarted","Data":"b0983c7cabdab170a00e1b2766ddbfda41b3e8e9b2d5565e1087fb3e6f1e0cf9"} Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.006642 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d","Type":"ContainerStarted","Data":"eb7b6c708132c10f2529fa689ca365f06089f38ada179b8005d8647c27b7974c"} Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.021333 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" podStartSLOduration=137.021316284 podStartE2EDuration="2m17.021316284s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:06.020118182 +0000 UTC m=+161.073757920" watchObservedRunningTime="2025-11-25 09:06:06.021316284 +0000 UTC m=+161.074956002" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.038116 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.038097438 podStartE2EDuration="2.038097438s" podCreationTimestamp="2025-11-25 09:06:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:06.035768006 +0000 UTC m=+161.089407714" watchObservedRunningTime="2025-11-25 09:06:06.038097438 +0000 UTC m=+161.091737156" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.056256 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-catalog-content\") pod \"redhat-marketplace-qsgtx\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.056350 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-utilities\") pod \"redhat-marketplace-qsgtx\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.056385 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bx6j\" (UniqueName: \"kubernetes.io/projected/31c9ed99-0727-472a-8b48-285fdaaf558c-kube-api-access-8bx6j\") pod \"redhat-marketplace-qsgtx\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.158137 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-utilities\") pod \"redhat-marketplace-qsgtx\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.158209 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bx6j\" (UniqueName: \"kubernetes.io/projected/31c9ed99-0727-472a-8b48-285fdaaf558c-kube-api-access-8bx6j\") pod \"redhat-marketplace-qsgtx\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.158326 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-catalog-content\") pod \"redhat-marketplace-qsgtx\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.159302 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-catalog-content\") pod \"redhat-marketplace-qsgtx\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.159566 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-utilities\") pod \"redhat-marketplace-qsgtx\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.172833 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx4ps"] Nov 25 09:06:06 crc kubenswrapper[4687]: W1125 09:06:06.182640 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3afcc926_b324_4fe9_933c_4918a88619d9.slice/crio-a65cfaec9ca454e62cb265a7584b02d544deee67759f448c2a6b38ba29ccef89 WatchSource:0}: Error finding container a65cfaec9ca454e62cb265a7584b02d544deee67759f448c2a6b38ba29ccef89: Status 404 returned error can't find the container with id a65cfaec9ca454e62cb265a7584b02d544deee67759f448c2a6b38ba29ccef89 Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.192556 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bx6j\" (UniqueName: \"kubernetes.io/projected/31c9ed99-0727-472a-8b48-285fdaaf558c-kube-api-access-8bx6j\") pod \"redhat-marketplace-qsgtx\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.260525 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.397646 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.501262 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.501311 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.507255 4687 patch_prober.go:28] interesting pod/downloads-7954f5f757-xmxvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.507341 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xmxvb" podUID="057a2f29-f877-40e5-9a25-d1a2d26918ad" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.507535 4687 patch_prober.go:28] interesting pod/downloads-7954f5f757-xmxvb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.507565 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-xmxvb" podUID="057a2f29-f877-40e5-9a25-d1a2d26918ad" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.511192 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.532175 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fqnbt"] Nov 25 09:06:06 crc kubenswrapper[4687]: E1125 09:06:06.532451 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c01bb95-1d19-435a-9090-da58d2110922" containerName="collect-profiles" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.532464 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c01bb95-1d19-435a-9090-da58d2110922" containerName="collect-profiles" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.532651 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c01bb95-1d19-435a-9090-da58d2110922" containerName="collect-profiles" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.533456 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.538584 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.543242 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:06 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:06 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:06 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.543659 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.545231 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fqnbt"] Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.567790 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3c01bb95-1d19-435a-9090-da58d2110922-secret-volume\") pod \"3c01bb95-1d19-435a-9090-da58d2110922\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.567829 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c01bb95-1d19-435a-9090-da58d2110922-config-volume\") pod \"3c01bb95-1d19-435a-9090-da58d2110922\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.568263 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smvsb\" (UniqueName: \"kubernetes.io/projected/3c01bb95-1d19-435a-9090-da58d2110922-kube-api-access-smvsb\") pod \"3c01bb95-1d19-435a-9090-da58d2110922\" (UID: \"3c01bb95-1d19-435a-9090-da58d2110922\") " Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.568631 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c01bb95-1d19-435a-9090-da58d2110922-config-volume" (OuterVolumeSpecName: "config-volume") pod "3c01bb95-1d19-435a-9090-da58d2110922" (UID: "3c01bb95-1d19-435a-9090-da58d2110922"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.574773 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c01bb95-1d19-435a-9090-da58d2110922-kube-api-access-smvsb" (OuterVolumeSpecName: "kube-api-access-smvsb") pod "3c01bb95-1d19-435a-9090-da58d2110922" (UID: "3c01bb95-1d19-435a-9090-da58d2110922"). InnerVolumeSpecName "kube-api-access-smvsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.576711 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c01bb95-1d19-435a-9090-da58d2110922-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3c01bb95-1d19-435a-9090-da58d2110922" (UID: "3c01bb95-1d19-435a-9090-da58d2110922"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.614476 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qsgtx"] Nov 25 09:06:06 crc kubenswrapper[4687]: W1125 09:06:06.621851 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31c9ed99_0727_472a_8b48_285fdaaf558c.slice/crio-bf85a45e962602a2aa27e278274c16c716c846a680c1d156f2cf9d2b6201e0aa WatchSource:0}: Error finding container bf85a45e962602a2aa27e278274c16c716c846a680c1d156f2cf9d2b6201e0aa: Status 404 returned error can't find the container with id bf85a45e962602a2aa27e278274c16c716c846a680c1d156f2cf9d2b6201e0aa Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.641667 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.641800 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.643108 4687 patch_prober.go:28] interesting pod/console-f9d7485db-zgglt container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.643160 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-zgglt" podUID="fbe26cf0-9829-4e16-b4c6-24484b1e678a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.669909 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twk6f\" (UniqueName: \"kubernetes.io/projected/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-kube-api-access-twk6f\") pod \"redhat-operators-fqnbt\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.669953 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-catalog-content\") pod \"redhat-operators-fqnbt\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.670102 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-utilities\") pod \"redhat-operators-fqnbt\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.670247 4687 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3c01bb95-1d19-435a-9090-da58d2110922-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.670268 4687 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3c01bb95-1d19-435a-9090-da58d2110922-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.670281 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smvsb\" (UniqueName: \"kubernetes.io/projected/3c01bb95-1d19-435a-9090-da58d2110922-kube-api-access-smvsb\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.771952 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twk6f\" (UniqueName: \"kubernetes.io/projected/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-kube-api-access-twk6f\") pod \"redhat-operators-fqnbt\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.772012 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-catalog-content\") pod \"redhat-operators-fqnbt\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.772133 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-utilities\") pod \"redhat-operators-fqnbt\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.773816 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-catalog-content\") pod \"redhat-operators-fqnbt\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.773972 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-utilities\") pod \"redhat-operators-fqnbt\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.791317 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twk6f\" (UniqueName: \"kubernetes.io/projected/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-kube-api-access-twk6f\") pod \"redhat-operators-fqnbt\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: E1125 09:06:06.863483 4687 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31c9ed99_0727_472a_8b48_285fdaaf558c.slice/crio-944c0f6b7c0757082b60b531c80fef2ca0d4fb9d53af605b456cb5477d50fb99.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.868054 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.927444 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bvzw4"] Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.928554 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:06 crc kubenswrapper[4687]: I1125 09:06:06.939655 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bvzw4"] Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.016864 4687 generic.go:334] "Generic (PLEG): container finished" podID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerID="944c0f6b7c0757082b60b531c80fef2ca0d4fb9d53af605b456cb5477d50fb99" exitCode=0 Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.016938 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qsgtx" event={"ID":"31c9ed99-0727-472a-8b48-285fdaaf558c","Type":"ContainerDied","Data":"944c0f6b7c0757082b60b531c80fef2ca0d4fb9d53af605b456cb5477d50fb99"} Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.016968 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qsgtx" event={"ID":"31c9ed99-0727-472a-8b48-285fdaaf558c","Type":"ContainerStarted","Data":"bf85a45e962602a2aa27e278274c16c716c846a680c1d156f2cf9d2b6201e0aa"} Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.019019 4687 generic.go:334] "Generic (PLEG): container finished" podID="3afcc926-b324-4fe9-933c-4918a88619d9" containerID="bfa4956f9204c4927294a3ce4e3080b7167d989da5072e1407bd60a38318266f" exitCode=0 Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.019696 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx4ps" event={"ID":"3afcc926-b324-4fe9-933c-4918a88619d9","Type":"ContainerDied","Data":"bfa4956f9204c4927294a3ce4e3080b7167d989da5072e1407bd60a38318266f"} Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.019854 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx4ps" event={"ID":"3afcc926-b324-4fe9-933c-4918a88619d9","Type":"ContainerStarted","Data":"a65cfaec9ca454e62cb265a7584b02d544deee67759f448c2a6b38ba29ccef89"} Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.024704 4687 generic.go:334] "Generic (PLEG): container finished" podID="7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d" containerID="b0983c7cabdab170a00e1b2766ddbfda41b3e8e9b2d5565e1087fb3e6f1e0cf9" exitCode=0 Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.024763 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d","Type":"ContainerDied","Data":"b0983c7cabdab170a00e1b2766ddbfda41b3e8e9b2d5565e1087fb3e6f1e0cf9"} Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.026858 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" event={"ID":"3c01bb95-1d19-435a-9090-da58d2110922","Type":"ContainerDied","Data":"c6afbb0b44001da091e7cabde2d909027467b5e4218991e9938654c39c287fd7"} Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.026955 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c6afbb0b44001da091e7cabde2d909027467b5e4218991e9938654c39c287fd7" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.026961 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.040945 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-hwpb2" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.094357 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjv4j\" (UniqueName: \"kubernetes.io/projected/f14d8d9b-a1bd-49c3-b5ae-85712b344568-kube-api-access-pjv4j\") pod \"redhat-operators-bvzw4\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.094830 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-catalog-content\") pod \"redhat-operators-bvzw4\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.095491 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-utilities\") pod \"redhat-operators-bvzw4\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.199313 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-catalog-content\") pod \"redhat-operators-bvzw4\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.199378 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-utilities\") pod \"redhat-operators-bvzw4\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.199472 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjv4j\" (UniqueName: \"kubernetes.io/projected/f14d8d9b-a1bd-49c3-b5ae-85712b344568-kube-api-access-pjv4j\") pod \"redhat-operators-bvzw4\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.200944 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-catalog-content\") pod \"redhat-operators-bvzw4\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.201535 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-utilities\") pod \"redhat-operators-bvzw4\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.213585 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fqnbt"] Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.262270 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjv4j\" (UniqueName: \"kubernetes.io/projected/f14d8d9b-a1bd-49c3-b5ae-85712b344568-kube-api-access-pjv4j\") pod \"redhat-operators-bvzw4\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.265562 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.540059 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.556662 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:07 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:07 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:07 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.556726 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.572019 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:06:07 crc kubenswrapper[4687]: I1125 09:06:07.710181 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bvzw4"] Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.037629 4687 generic.go:334] "Generic (PLEG): container finished" podID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerID="87d51eb94586abd7777db1b1cbbe29942a6fb69a9180642f0a2a5e9e18695f2d" exitCode=0 Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.037742 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fqnbt" event={"ID":"c9e2b2d2-bd11-4098-b71a-d2787d834e9d","Type":"ContainerDied","Data":"87d51eb94586abd7777db1b1cbbe29942a6fb69a9180642f0a2a5e9e18695f2d"} Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.037818 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fqnbt" event={"ID":"c9e2b2d2-bd11-4098-b71a-d2787d834e9d","Type":"ContainerStarted","Data":"b24a2e82cf06fce8e503bdddde61833e8cb660d0138f5b78e491812327ae4863"} Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.039281 4687 generic.go:334] "Generic (PLEG): container finished" podID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerID="df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8" exitCode=0 Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.039350 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bvzw4" event={"ID":"f14d8d9b-a1bd-49c3-b5ae-85712b344568","Type":"ContainerDied","Data":"df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8"} Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.039390 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bvzw4" event={"ID":"f14d8d9b-a1bd-49c3-b5ae-85712b344568","Type":"ContainerStarted","Data":"ed0329373030c69054eb8210893b4449ee848c0ee7e7389933ed5b0462d4d851"} Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.389846 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.421554 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kubelet-dir\") pod \"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d\" (UID: \"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d\") " Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.421693 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kube-api-access\") pod \"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d\" (UID: \"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d\") " Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.421736 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d" (UID: "7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.422334 4687 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.445764 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d" (UID: "7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.525125 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.543865 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:08 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:08 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:08 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:08 crc kubenswrapper[4687]: I1125 09:06:08.543940 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.053562 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d","Type":"ContainerDied","Data":"eb7b6c708132c10f2529fa689ca365f06089f38ada179b8005d8647c27b7974c"} Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.053611 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb7b6c708132c10f2529fa689ca365f06089f38ada179b8005d8647c27b7974c" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.053668 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.448364 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:06:09 crc kubenswrapper[4687]: E1125 09:06:09.448609 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d" containerName="pruner" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.448621 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d" containerName="pruner" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.448714 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cd7dccf-a26c-4cba-a5b7-86a1b7b7c29d" containerName="pruner" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.450123 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.455241 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.456406 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.459803 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.538822 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3da4581e-7d38-4a03-bb39-f2a89667eed0-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3da4581e-7d38-4a03-bb39-f2a89667eed0\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.538876 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3da4581e-7d38-4a03-bb39-f2a89667eed0-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3da4581e-7d38-4a03-bb39-f2a89667eed0\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.540646 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:09 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:09 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:09 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.540690 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.640226 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3da4581e-7d38-4a03-bb39-f2a89667eed0-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3da4581e-7d38-4a03-bb39-f2a89667eed0\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.640336 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3da4581e-7d38-4a03-bb39-f2a89667eed0-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3da4581e-7d38-4a03-bb39-f2a89667eed0\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.640653 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3da4581e-7d38-4a03-bb39-f2a89667eed0-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3da4581e-7d38-4a03-bb39-f2a89667eed0\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.673468 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3da4581e-7d38-4a03-bb39-f2a89667eed0-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3da4581e-7d38-4a03-bb39-f2a89667eed0\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:06:09 crc kubenswrapper[4687]: I1125 09:06:09.776631 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:06:10 crc kubenswrapper[4687]: I1125 09:06:10.042827 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:06:10 crc kubenswrapper[4687]: W1125 09:06:10.087734 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod3da4581e_7d38_4a03_bb39_f2a89667eed0.slice/crio-b8a599257f1e8ff68ec63be5724f5efeca08f68d0848861c2e9afc70f76daa6e WatchSource:0}: Error finding container b8a599257f1e8ff68ec63be5724f5efeca08f68d0848861c2e9afc70f76daa6e: Status 404 returned error can't find the container with id b8a599257f1e8ff68ec63be5724f5efeca08f68d0848861c2e9afc70f76daa6e Nov 25 09:06:10 crc kubenswrapper[4687]: I1125 09:06:10.541992 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:10 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:10 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:10 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:10 crc kubenswrapper[4687]: I1125 09:06:10.542374 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:11 crc kubenswrapper[4687]: I1125 09:06:11.090018 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3da4581e-7d38-4a03-bb39-f2a89667eed0","Type":"ContainerStarted","Data":"286abbec3a5eb28deea998b58f97e1cf90b55d8a14976677b7ddc098bc2f4bed"} Nov 25 09:06:11 crc kubenswrapper[4687]: I1125 09:06:11.090065 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3da4581e-7d38-4a03-bb39-f2a89667eed0","Type":"ContainerStarted","Data":"b8a599257f1e8ff68ec63be5724f5efeca08f68d0848861c2e9afc70f76daa6e"} Nov 25 09:06:11 crc kubenswrapper[4687]: I1125 09:06:11.103373 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.10335345 podStartE2EDuration="2.10335345s" podCreationTimestamp="2025-11-25 09:06:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:11.101220563 +0000 UTC m=+166.154860281" watchObservedRunningTime="2025-11-25 09:06:11.10335345 +0000 UTC m=+166.156993178" Nov 25 09:06:11 crc kubenswrapper[4687]: I1125 09:06:11.544361 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:11 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:11 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:11 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:11 crc kubenswrapper[4687]: I1125 09:06:11.544421 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:12 crc kubenswrapper[4687]: I1125 09:06:12.122170 4687 generic.go:334] "Generic (PLEG): container finished" podID="3da4581e-7d38-4a03-bb39-f2a89667eed0" containerID="286abbec3a5eb28deea998b58f97e1cf90b55d8a14976677b7ddc098bc2f4bed" exitCode=0 Nov 25 09:06:12 crc kubenswrapper[4687]: I1125 09:06:12.122326 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3da4581e-7d38-4a03-bb39-f2a89667eed0","Type":"ContainerDied","Data":"286abbec3a5eb28deea998b58f97e1cf90b55d8a14976677b7ddc098bc2f4bed"} Nov 25 09:06:12 crc kubenswrapper[4687]: I1125 09:06:12.410473 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:06:12 crc kubenswrapper[4687]: I1125 09:06:12.420562 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0433643a-5ed9-485b-a788-51de4a92f461-metrics-certs\") pod \"network-metrics-daemon-cscrb\" (UID: \"0433643a-5ed9-485b-a788-51de4a92f461\") " pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:06:12 crc kubenswrapper[4687]: I1125 09:06:12.540496 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:12 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:12 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:12 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:12 crc kubenswrapper[4687]: I1125 09:06:12.540569 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:12 crc kubenswrapper[4687]: I1125 09:06:12.584717 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cscrb" Nov 25 09:06:12 crc kubenswrapper[4687]: I1125 09:06:12.658472 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-cpbmq" Nov 25 09:06:13 crc kubenswrapper[4687]: I1125 09:06:13.541672 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:13 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:13 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:13 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:13 crc kubenswrapper[4687]: I1125 09:06:13.542250 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:14 crc kubenswrapper[4687]: I1125 09:06:14.540914 4687 patch_prober.go:28] interesting pod/router-default-5444994796-p2nkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:06:14 crc kubenswrapper[4687]: [-]has-synced failed: reason withheld Nov 25 09:06:14 crc kubenswrapper[4687]: [+]process-running ok Nov 25 09:06:14 crc kubenswrapper[4687]: healthz check failed Nov 25 09:06:14 crc kubenswrapper[4687]: I1125 09:06:14.540971 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p2nkd" podUID="9aee268c-0a3a-4f5b-8449-c71d027e9d97" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:06:15 crc kubenswrapper[4687]: I1125 09:06:15.613354 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:06:15 crc kubenswrapper[4687]: I1125 09:06:15.618340 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-p2nkd" Nov 25 09:06:16 crc kubenswrapper[4687]: I1125 09:06:16.509708 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-xmxvb" Nov 25 09:06:16 crc kubenswrapper[4687]: I1125 09:06:16.545197 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:06:16 crc kubenswrapper[4687]: I1125 09:06:16.569960 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3da4581e-7d38-4a03-bb39-f2a89667eed0-kubelet-dir\") pod \"3da4581e-7d38-4a03-bb39-f2a89667eed0\" (UID: \"3da4581e-7d38-4a03-bb39-f2a89667eed0\") " Nov 25 09:06:16 crc kubenswrapper[4687]: I1125 09:06:16.570134 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3da4581e-7d38-4a03-bb39-f2a89667eed0-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3da4581e-7d38-4a03-bb39-f2a89667eed0" (UID: "3da4581e-7d38-4a03-bb39-f2a89667eed0"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:06:16 crc kubenswrapper[4687]: I1125 09:06:16.570257 4687 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3da4581e-7d38-4a03-bb39-f2a89667eed0-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:16 crc kubenswrapper[4687]: I1125 09:06:16.642491 4687 patch_prober.go:28] interesting pod/console-f9d7485db-zgglt container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 25 09:06:16 crc kubenswrapper[4687]: I1125 09:06:16.642581 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-zgglt" podUID="fbe26cf0-9829-4e16-b4c6-24484b1e678a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 25 09:06:16 crc kubenswrapper[4687]: I1125 09:06:16.670936 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3da4581e-7d38-4a03-bb39-f2a89667eed0-kube-api-access\") pod \"3da4581e-7d38-4a03-bb39-f2a89667eed0\" (UID: \"3da4581e-7d38-4a03-bb39-f2a89667eed0\") " Nov 25 09:06:16 crc kubenswrapper[4687]: I1125 09:06:16.676544 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3da4581e-7d38-4a03-bb39-f2a89667eed0-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3da4581e-7d38-4a03-bb39-f2a89667eed0" (UID: "3da4581e-7d38-4a03-bb39-f2a89667eed0"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:06:16 crc kubenswrapper[4687]: I1125 09:06:16.772468 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3da4581e-7d38-4a03-bb39-f2a89667eed0-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:17 crc kubenswrapper[4687]: I1125 09:06:17.162727 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3da4581e-7d38-4a03-bb39-f2a89667eed0","Type":"ContainerDied","Data":"b8a599257f1e8ff68ec63be5724f5efeca08f68d0848861c2e9afc70f76daa6e"} Nov 25 09:06:17 crc kubenswrapper[4687]: I1125 09:06:17.162765 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8a599257f1e8ff68ec63be5724f5efeca08f68d0848861c2e9afc70f76daa6e" Nov 25 09:06:17 crc kubenswrapper[4687]: I1125 09:06:17.162774 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:06:17 crc kubenswrapper[4687]: I1125 09:06:17.440111 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-cscrb"] Nov 25 09:06:17 crc kubenswrapper[4687]: W1125 09:06:17.450355 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0433643a_5ed9_485b_a788_51de4a92f461.slice/crio-4867429346ee2763c0d392205463d08e24e2851dc4bf44d9c0dbb3479e1129cf WatchSource:0}: Error finding container 4867429346ee2763c0d392205463d08e24e2851dc4bf44d9c0dbb3479e1129cf: Status 404 returned error can't find the container with id 4867429346ee2763c0d392205463d08e24e2851dc4bf44d9c0dbb3479e1129cf Nov 25 09:06:18 crc kubenswrapper[4687]: I1125 09:06:18.180611 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-cscrb" event={"ID":"0433643a-5ed9-485b-a788-51de4a92f461","Type":"ContainerStarted","Data":"c90d967a683fcdcb542099dd4bc243ee13dddd894173c5f02e1c5518d03178f0"} Nov 25 09:06:18 crc kubenswrapper[4687]: I1125 09:06:18.181104 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-cscrb" event={"ID":"0433643a-5ed9-485b-a788-51de4a92f461","Type":"ContainerStarted","Data":"4867429346ee2763c0d392205463d08e24e2851dc4bf44d9c0dbb3479e1129cf"} Nov 25 09:06:23 crc kubenswrapper[4687]: I1125 09:06:23.844588 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:06:23 crc kubenswrapper[4687]: I1125 09:06:23.845402 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:06:25 crc kubenswrapper[4687]: I1125 09:06:25.234001 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:06:26 crc kubenswrapper[4687]: I1125 09:06:26.656175 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:06:26 crc kubenswrapper[4687]: I1125 09:06:26.662109 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:06:33 crc kubenswrapper[4687]: I1125 09:06:33.153628 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:06:36 crc kubenswrapper[4687]: E1125 09:06:36.098933 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 09:06:36 crc kubenswrapper[4687]: E1125 09:06:36.099476 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s8bnj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-49wvg_openshift-marketplace(614abb7b-e915-4b2a-9f57-bbd8ec4d2433): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:06:36 crc kubenswrapper[4687]: E1125 09:06:36.100746 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-49wvg" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" Nov 25 09:06:37 crc kubenswrapper[4687]: I1125 09:06:37.278588 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" Nov 25 09:06:38 crc kubenswrapper[4687]: E1125 09:06:38.347884 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-49wvg" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" Nov 25 09:06:38 crc kubenswrapper[4687]: E1125 09:06:38.608060 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 09:06:38 crc kubenswrapper[4687]: E1125 09:06:38.608299 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sl985,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-59vsx_openshift-marketplace(d8cf4b36-d7aa-4d9a-bc65-a893435ca244): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:06:38 crc kubenswrapper[4687]: E1125 09:06:38.609549 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-59vsx" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" Nov 25 09:06:38 crc kubenswrapper[4687]: E1125 09:06:38.720432 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 09:06:38 crc kubenswrapper[4687]: E1125 09:06:38.720976 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pjv4j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-bvzw4_openshift-marketplace(f14d8d9b-a1bd-49c3-b5ae-85712b344568): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:06:38 crc kubenswrapper[4687]: E1125 09:06:38.722192 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-bvzw4" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" Nov 25 09:06:39 crc kubenswrapper[4687]: E1125 09:06:39.575006 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-bvzw4" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" Nov 25 09:06:39 crc kubenswrapper[4687]: E1125 09:06:39.575239 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-59vsx" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" Nov 25 09:06:39 crc kubenswrapper[4687]: E1125 09:06:39.679321 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 09:06:39 crc kubenswrapper[4687]: E1125 09:06:39.679704 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-888bv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-gpptd_openshift-marketplace(ccde1639-05be-47c5-93b8-c1eb83167814): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:06:39 crc kubenswrapper[4687]: E1125 09:06:39.681908 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-gpptd" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" Nov 25 09:06:40 crc kubenswrapper[4687]: E1125 09:06:40.807892 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-gpptd" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" Nov 25 09:06:41 crc kubenswrapper[4687]: E1125 09:06:41.019424 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 09:06:41 crc kubenswrapper[4687]: E1125 09:06:41.019629 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8bx6j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-qsgtx_openshift-marketplace(31c9ed99-0727-472a-8b48-285fdaaf558c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:06:41 crc kubenswrapper[4687]: E1125 09:06:41.020789 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-qsgtx" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" Nov 25 09:06:41 crc kubenswrapper[4687]: E1125 09:06:41.245639 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 09:06:41 crc kubenswrapper[4687]: E1125 09:06:41.245845 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vjttc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-nx4ps_openshift-marketplace(3afcc926-b324-4fe9-933c-4918a88619d9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:06:41 crc kubenswrapper[4687]: E1125 09:06:41.247052 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-nx4ps" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" Nov 25 09:06:41 crc kubenswrapper[4687]: E1125 09:06:41.330227 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-qsgtx" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" Nov 25 09:06:41 crc kubenswrapper[4687]: E1125 09:06:41.330302 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-nx4ps" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" Nov 25 09:06:42 crc kubenswrapper[4687]: I1125 09:06:42.334693 4687 generic.go:334] "Generic (PLEG): container finished" podID="87d111a8-6128-4b3c-a3de-2143ab121856" containerID="4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3" exitCode=0 Nov 25 09:06:42 crc kubenswrapper[4687]: I1125 09:06:42.334771 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8k2g" event={"ID":"87d111a8-6128-4b3c-a3de-2143ab121856","Type":"ContainerDied","Data":"4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3"} Nov 25 09:06:42 crc kubenswrapper[4687]: I1125 09:06:42.343367 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-cscrb" event={"ID":"0433643a-5ed9-485b-a788-51de4a92f461","Type":"ContainerStarted","Data":"722085b2999c6d346285c4ed490af1b4fb52cd311f5ae3b4d0c4acebd1f090ab"} Nov 25 09:06:42 crc kubenswrapper[4687]: I1125 09:06:42.345333 4687 generic.go:334] "Generic (PLEG): container finished" podID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerID="ef97aa67437478c5404595ab9730d508baab5541993b6b9af8792c7dbbb24e67" exitCode=0 Nov 25 09:06:42 crc kubenswrapper[4687]: I1125 09:06:42.345356 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fqnbt" event={"ID":"c9e2b2d2-bd11-4098-b71a-d2787d834e9d","Type":"ContainerDied","Data":"ef97aa67437478c5404595ab9730d508baab5541993b6b9af8792c7dbbb24e67"} Nov 25 09:06:42 crc kubenswrapper[4687]: I1125 09:06:42.393394 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-cscrb" podStartSLOduration=173.393378146 podStartE2EDuration="2m53.393378146s" podCreationTimestamp="2025-11-25 09:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:42.374190857 +0000 UTC m=+197.427830575" watchObservedRunningTime="2025-11-25 09:06:42.393378146 +0000 UTC m=+197.447017864" Nov 25 09:06:43 crc kubenswrapper[4687]: I1125 09:06:43.354891 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fqnbt" event={"ID":"c9e2b2d2-bd11-4098-b71a-d2787d834e9d","Type":"ContainerStarted","Data":"94cc138c70abb9f2a2a49f202efafe4e7e86d67c776a1a891a33272db324268f"} Nov 25 09:06:43 crc kubenswrapper[4687]: I1125 09:06:43.359038 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8k2g" event={"ID":"87d111a8-6128-4b3c-a3de-2143ab121856","Type":"ContainerStarted","Data":"523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715"} Nov 25 09:06:43 crc kubenswrapper[4687]: I1125 09:06:43.376926 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fqnbt" podStartSLOduration=2.404888728 podStartE2EDuration="37.37690246s" podCreationTimestamp="2025-11-25 09:06:06 +0000 UTC" firstStartedPulling="2025-11-25 09:06:08.03886381 +0000 UTC m=+163.092503528" lastFinishedPulling="2025-11-25 09:06:43.010877532 +0000 UTC m=+198.064517260" observedRunningTime="2025-11-25 09:06:43.373236999 +0000 UTC m=+198.426876747" watchObservedRunningTime="2025-11-25 09:06:43.37690246 +0000 UTC m=+198.430542178" Nov 25 09:06:44 crc kubenswrapper[4687]: I1125 09:06:44.100401 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:44 crc kubenswrapper[4687]: I1125 09:06:44.100741 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:45 crc kubenswrapper[4687]: I1125 09:06:45.243452 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-w8k2g" podUID="87d111a8-6128-4b3c-a3de-2143ab121856" containerName="registry-server" probeResult="failure" output=< Nov 25 09:06:45 crc kubenswrapper[4687]: timeout: failed to connect service ":50051" within 1s Nov 25 09:06:45 crc kubenswrapper[4687]: > Nov 25 09:06:46 crc kubenswrapper[4687]: I1125 09:06:46.868211 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:46 crc kubenswrapper[4687]: I1125 09:06:46.868540 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:47 crc kubenswrapper[4687]: I1125 09:06:47.909728 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fqnbt" podUID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerName="registry-server" probeResult="failure" output=< Nov 25 09:06:47 crc kubenswrapper[4687]: timeout: failed to connect service ":50051" within 1s Nov 25 09:06:47 crc kubenswrapper[4687]: > Nov 25 09:06:48 crc kubenswrapper[4687]: I1125 09:06:48.761766 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w8k2g" podStartSLOduration=7.734730154 podStartE2EDuration="45.761749873s" podCreationTimestamp="2025-11-25 09:06:03 +0000 UTC" firstStartedPulling="2025-11-25 09:06:04.973829386 +0000 UTC m=+160.027469104" lastFinishedPulling="2025-11-25 09:06:43.000849085 +0000 UTC m=+198.054488823" observedRunningTime="2025-11-25 09:06:43.391695318 +0000 UTC m=+198.445335046" watchObservedRunningTime="2025-11-25 09:06:48.761749873 +0000 UTC m=+203.815389591" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.191363 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 09:06:50 crc kubenswrapper[4687]: E1125 09:06:50.191941 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3da4581e-7d38-4a03-bb39-f2a89667eed0" containerName="pruner" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.191957 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3da4581e-7d38-4a03-bb39-f2a89667eed0" containerName="pruner" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.192092 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="3da4581e-7d38-4a03-bb39-f2a89667eed0" containerName="pruner" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.192635 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.194492 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.194870 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.206059 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.332325 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e900ac0d-0b2d-4260-ba94-e2182072fa53-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e900ac0d-0b2d-4260-ba94-e2182072fa53\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.332480 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e900ac0d-0b2d-4260-ba94-e2182072fa53-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e900ac0d-0b2d-4260-ba94-e2182072fa53\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.401872 4687 generic.go:334] "Generic (PLEG): container finished" podID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerID="40cff460501fcbf9c6fe6f9a45db64400e08bde1507764ed45c26ff05e579e28" exitCode=0 Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.401913 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49wvg" event={"ID":"614abb7b-e915-4b2a-9f57-bbd8ec4d2433","Type":"ContainerDied","Data":"40cff460501fcbf9c6fe6f9a45db64400e08bde1507764ed45c26ff05e579e28"} Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.434325 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e900ac0d-0b2d-4260-ba94-e2182072fa53-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e900ac0d-0b2d-4260-ba94-e2182072fa53\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.434410 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e900ac0d-0b2d-4260-ba94-e2182072fa53-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e900ac0d-0b2d-4260-ba94-e2182072fa53\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.434526 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e900ac0d-0b2d-4260-ba94-e2182072fa53-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e900ac0d-0b2d-4260-ba94-e2182072fa53\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.464182 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e900ac0d-0b2d-4260-ba94-e2182072fa53-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e900ac0d-0b2d-4260-ba94-e2182072fa53\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.517851 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:06:50 crc kubenswrapper[4687]: I1125 09:06:50.705105 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 09:06:51 crc kubenswrapper[4687]: I1125 09:06:51.408396 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e900ac0d-0b2d-4260-ba94-e2182072fa53","Type":"ContainerStarted","Data":"33fd3a0852be460da619086f2682d739b69913fcdc7aff09077137afd4ff9f7b"} Nov 25 09:06:51 crc kubenswrapper[4687]: I1125 09:06:51.408838 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e900ac0d-0b2d-4260-ba94-e2182072fa53","Type":"ContainerStarted","Data":"72123e5b09590fa01ce7ebc3bdd3e8b2c713d918521d349d02646bbdae6a9d8b"} Nov 25 09:06:51 crc kubenswrapper[4687]: I1125 09:06:51.420136 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=1.420116204 podStartE2EDuration="1.420116204s" podCreationTimestamp="2025-11-25 09:06:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:51.41924321 +0000 UTC m=+206.472882948" watchObservedRunningTime="2025-11-25 09:06:51.420116204 +0000 UTC m=+206.473755922" Nov 25 09:06:52 crc kubenswrapper[4687]: I1125 09:06:52.414996 4687 generic.go:334] "Generic (PLEG): container finished" podID="e900ac0d-0b2d-4260-ba94-e2182072fa53" containerID="33fd3a0852be460da619086f2682d739b69913fcdc7aff09077137afd4ff9f7b" exitCode=0 Nov 25 09:06:52 crc kubenswrapper[4687]: I1125 09:06:52.415084 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e900ac0d-0b2d-4260-ba94-e2182072fa53","Type":"ContainerDied","Data":"33fd3a0852be460da619086f2682d739b69913fcdc7aff09077137afd4ff9f7b"} Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.423074 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49wvg" event={"ID":"614abb7b-e915-4b2a-9f57-bbd8ec4d2433","Type":"ContainerStarted","Data":"078bc5e927f35f1a98f60a7fe9a0fb2184335b44babf71a0618ea5251a88f766"} Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.446489 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-49wvg" podStartSLOduration=3.203418953 podStartE2EDuration="50.4464732s" podCreationTimestamp="2025-11-25 09:06:03 +0000 UTC" firstStartedPulling="2025-11-25 09:06:04.997839442 +0000 UTC m=+160.051479160" lastFinishedPulling="2025-11-25 09:06:52.240893679 +0000 UTC m=+207.294533407" observedRunningTime="2025-11-25 09:06:53.445728469 +0000 UTC m=+208.499368197" watchObservedRunningTime="2025-11-25 09:06:53.4464732 +0000 UTC m=+208.500112918" Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.646385 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.775625 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e900ac0d-0b2d-4260-ba94-e2182072fa53-kubelet-dir\") pod \"e900ac0d-0b2d-4260-ba94-e2182072fa53\" (UID: \"e900ac0d-0b2d-4260-ba94-e2182072fa53\") " Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.775747 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e900ac0d-0b2d-4260-ba94-e2182072fa53-kube-api-access\") pod \"e900ac0d-0b2d-4260-ba94-e2182072fa53\" (UID: \"e900ac0d-0b2d-4260-ba94-e2182072fa53\") " Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.775770 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e900ac0d-0b2d-4260-ba94-e2182072fa53-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e900ac0d-0b2d-4260-ba94-e2182072fa53" (UID: "e900ac0d-0b2d-4260-ba94-e2182072fa53"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.783523 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e900ac0d-0b2d-4260-ba94-e2182072fa53-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e900ac0d-0b2d-4260-ba94-e2182072fa53" (UID: "e900ac0d-0b2d-4260-ba94-e2182072fa53"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.844249 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.844307 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.844351 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.844942 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.845047 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174" gracePeriod=600 Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.877172 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e900ac0d-0b2d-4260-ba94-e2182072fa53-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:53 crc kubenswrapper[4687]: I1125 09:06:53.877207 4687 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e900ac0d-0b2d-4260-ba94-e2182072fa53-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:54 crc kubenswrapper[4687]: I1125 09:06:54.267842 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:54 crc kubenswrapper[4687]: I1125 09:06:54.279108 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:54 crc kubenswrapper[4687]: I1125 09:06:54.279151 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:06:54 crc kubenswrapper[4687]: I1125 09:06:54.330433 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:54 crc kubenswrapper[4687]: I1125 09:06:54.428941 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174" exitCode=0 Nov 25 09:06:54 crc kubenswrapper[4687]: I1125 09:06:54.429033 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174"} Nov 25 09:06:54 crc kubenswrapper[4687]: I1125 09:06:54.431196 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e900ac0d-0b2d-4260-ba94-e2182072fa53","Type":"ContainerDied","Data":"72123e5b09590fa01ce7ebc3bdd3e8b2c713d918521d349d02646bbdae6a9d8b"} Nov 25 09:06:54 crc kubenswrapper[4687]: I1125 09:06:54.431240 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72123e5b09590fa01ce7ebc3bdd3e8b2c713d918521d349d02646bbdae6a9d8b" Nov 25 09:06:54 crc kubenswrapper[4687]: I1125 09:06:54.431241 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.172653 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w8k2g"] Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.320853 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-49wvg" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerName="registry-server" probeResult="failure" output=< Nov 25 09:06:55 crc kubenswrapper[4687]: timeout: failed to connect service ":50051" within 1s Nov 25 09:06:55 crc kubenswrapper[4687]: > Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.437303 4687 generic.go:334] "Generic (PLEG): container finished" podID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerID="aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba" exitCode=0 Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.437361 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bvzw4" event={"ID":"f14d8d9b-a1bd-49c3-b5ae-85712b344568","Type":"ContainerDied","Data":"aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba"} Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.441705 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"c575149652dbebb18c6b942edd7015230ac6861490511f4bb26940eebb3f97bc"} Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.442775 4687 generic.go:334] "Generic (PLEG): container finished" podID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerID="2e180633bf01df9fc83530f0fb8c3f68f1976bb1fff9c6e919aa64dcfd3a7fb3" exitCode=0 Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.442928 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59vsx" event={"ID":"d8cf4b36-d7aa-4d9a-bc65-a893435ca244","Type":"ContainerDied","Data":"2e180633bf01df9fc83530f0fb8c3f68f1976bb1fff9c6e919aa64dcfd3a7fb3"} Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.443115 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w8k2g" podUID="87d111a8-6128-4b3c-a3de-2143ab121856" containerName="registry-server" containerID="cri-o://523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715" gracePeriod=2 Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.789906 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 09:06:55 crc kubenswrapper[4687]: E1125 09:06:55.790706 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e900ac0d-0b2d-4260-ba94-e2182072fa53" containerName="pruner" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.790725 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="e900ac0d-0b2d-4260-ba94-e2182072fa53" containerName="pruner" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.790966 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="e900ac0d-0b2d-4260-ba94-e2182072fa53" containerName="pruner" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.792657 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.802758 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.802980 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.808952 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.808990 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.901702 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-catalog-content\") pod \"87d111a8-6128-4b3c-a3de-2143ab121856\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.901833 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-utilities\") pod \"87d111a8-6128-4b3c-a3de-2143ab121856\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.901874 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7rsq\" (UniqueName: \"kubernetes.io/projected/87d111a8-6128-4b3c-a3de-2143ab121856-kube-api-access-h7rsq\") pod \"87d111a8-6128-4b3c-a3de-2143ab121856\" (UID: \"87d111a8-6128-4b3c-a3de-2143ab121856\") " Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.902037 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kubelet-dir\") pod \"installer-9-crc\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.902089 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kube-api-access\") pod \"installer-9-crc\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.902132 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-var-lock\") pod \"installer-9-crc\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.903404 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-utilities" (OuterVolumeSpecName: "utilities") pod "87d111a8-6128-4b3c-a3de-2143ab121856" (UID: "87d111a8-6128-4b3c-a3de-2143ab121856"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.908229 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87d111a8-6128-4b3c-a3de-2143ab121856-kube-api-access-h7rsq" (OuterVolumeSpecName: "kube-api-access-h7rsq") pod "87d111a8-6128-4b3c-a3de-2143ab121856" (UID: "87d111a8-6128-4b3c-a3de-2143ab121856"). InnerVolumeSpecName "kube-api-access-h7rsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:06:55 crc kubenswrapper[4687]: I1125 09:06:55.968171 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87d111a8-6128-4b3c-a3de-2143ab121856" (UID: "87d111a8-6128-4b3c-a3de-2143ab121856"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.002917 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kube-api-access\") pod \"installer-9-crc\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.003004 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-var-lock\") pod \"installer-9-crc\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.003061 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kubelet-dir\") pod \"installer-9-crc\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.003101 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.003112 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87d111a8-6128-4b3c-a3de-2143ab121856-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.003124 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7rsq\" (UniqueName: \"kubernetes.io/projected/87d111a8-6128-4b3c-a3de-2143ab121856-kube-api-access-h7rsq\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.003174 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kubelet-dir\") pod \"installer-9-crc\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.003211 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-var-lock\") pod \"installer-9-crc\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.018098 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kube-api-access\") pod \"installer-9-crc\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.148216 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.450261 4687 generic.go:334] "Generic (PLEG): container finished" podID="3afcc926-b324-4fe9-933c-4918a88619d9" containerID="90a6b85e47fd5b21e2cdb165baa3675493ca81a4c765a2e20955e5a46f7a4b91" exitCode=0 Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.450863 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx4ps" event={"ID":"3afcc926-b324-4fe9-933c-4918a88619d9","Type":"ContainerDied","Data":"90a6b85e47fd5b21e2cdb165baa3675493ca81a4c765a2e20955e5a46f7a4b91"} Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.454441 4687 generic.go:334] "Generic (PLEG): container finished" podID="ccde1639-05be-47c5-93b8-c1eb83167814" containerID="43759463b90b228b018e6cb77363d996b893669384d2ddf8f4c267081812a120" exitCode=0 Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.454524 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpptd" event={"ID":"ccde1639-05be-47c5-93b8-c1eb83167814","Type":"ContainerDied","Data":"43759463b90b228b018e6cb77363d996b893669384d2ddf8f4c267081812a120"} Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.457034 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59vsx" event={"ID":"d8cf4b36-d7aa-4d9a-bc65-a893435ca244","Type":"ContainerStarted","Data":"919d925f253c665e7c4b4ee72e57c4daa6d9ab7a87277bbfc937648664dbc4b9"} Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.461385 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bvzw4" event={"ID":"f14d8d9b-a1bd-49c3-b5ae-85712b344568","Type":"ContainerStarted","Data":"f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498"} Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.463795 4687 generic.go:334] "Generic (PLEG): container finished" podID="87d111a8-6128-4b3c-a3de-2143ab121856" containerID="523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715" exitCode=0 Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.463847 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8k2g" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.463882 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8k2g" event={"ID":"87d111a8-6128-4b3c-a3de-2143ab121856","Type":"ContainerDied","Data":"523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715"} Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.463921 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8k2g" event={"ID":"87d111a8-6128-4b3c-a3de-2143ab121856","Type":"ContainerDied","Data":"a91af760a3b34c60d0e041244a410bd414868e2be57f3e1202f36296c8b8fd8e"} Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.463967 4687 scope.go:117] "RemoveContainer" containerID="523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.485150 4687 scope.go:117] "RemoveContainer" containerID="4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.494549 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bvzw4" podStartSLOduration=2.615799714 podStartE2EDuration="50.494499162s" podCreationTimestamp="2025-11-25 09:06:06 +0000 UTC" firstStartedPulling="2025-11-25 09:06:08.041744806 +0000 UTC m=+163.095384524" lastFinishedPulling="2025-11-25 09:06:55.920444254 +0000 UTC m=+210.974083972" observedRunningTime="2025-11-25 09:06:56.488925168 +0000 UTC m=+211.542564886" watchObservedRunningTime="2025-11-25 09:06:56.494499162 +0000 UTC m=+211.548138880" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.516791 4687 scope.go:117] "RemoveContainer" containerID="505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.531185 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-59vsx" podStartSLOduration=2.60628635 podStartE2EDuration="53.531167663s" podCreationTimestamp="2025-11-25 09:06:03 +0000 UTC" firstStartedPulling="2025-11-25 09:06:04.97019695 +0000 UTC m=+160.023836668" lastFinishedPulling="2025-11-25 09:06:55.895078263 +0000 UTC m=+210.948717981" observedRunningTime="2025-11-25 09:06:56.530093633 +0000 UTC m=+211.583733361" watchObservedRunningTime="2025-11-25 09:06:56.531167663 +0000 UTC m=+211.584807381" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.535590 4687 scope.go:117] "RemoveContainer" containerID="523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715" Nov 25 09:06:56 crc kubenswrapper[4687]: E1125 09:06:56.536108 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715\": container with ID starting with 523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715 not found: ID does not exist" containerID="523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.536148 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715"} err="failed to get container status \"523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715\": rpc error: code = NotFound desc = could not find container \"523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715\": container with ID starting with 523ce6880f708adee6c45b11f9a9f3ce19f2ccf6f299f634c6c8aaaaf0a15715 not found: ID does not exist" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.536180 4687 scope.go:117] "RemoveContainer" containerID="4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3" Nov 25 09:06:56 crc kubenswrapper[4687]: E1125 09:06:56.536773 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3\": container with ID starting with 4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3 not found: ID does not exist" containerID="4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.536812 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3"} err="failed to get container status \"4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3\": rpc error: code = NotFound desc = could not find container \"4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3\": container with ID starting with 4e085e50ca3e56b0a3ae1b791c50f73e5f03857b4d916d416d4f2659b5ace4e3 not found: ID does not exist" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.536832 4687 scope.go:117] "RemoveContainer" containerID="505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f" Nov 25 09:06:56 crc kubenswrapper[4687]: E1125 09:06:56.537283 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f\": container with ID starting with 505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f not found: ID does not exist" containerID="505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.537316 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f"} err="failed to get container status \"505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f\": rpc error: code = NotFound desc = could not find container \"505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f\": container with ID starting with 505e36a1ddd50f5f948b175dcfec7652a1f0a4a44a610044ab8d7e53ddebde5f not found: ID does not exist" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.545550 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.558861 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w8k2g"] Nov 25 09:06:56 crc kubenswrapper[4687]: W1125 09:06:56.568399 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podb9ef2435_1dc0_412c_b4fd_fcfd912180ec.slice/crio-eb9a6be2cd1c70e6fe7257520c030cf28426ff4da57ce62c2636d6ffd2b79863 WatchSource:0}: Error finding container eb9a6be2cd1c70e6fe7257520c030cf28426ff4da57ce62c2636d6ffd2b79863: Status 404 returned error can't find the container with id eb9a6be2cd1c70e6fe7257520c030cf28426ff4da57ce62c2636d6ffd2b79863 Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.572983 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-w8k2g"] Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.916888 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:56 crc kubenswrapper[4687]: I1125 09:06:56.962173 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:06:57 crc kubenswrapper[4687]: I1125 09:06:57.266316 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:57 crc kubenswrapper[4687]: I1125 09:06:57.277030 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:06:57 crc kubenswrapper[4687]: I1125 09:06:57.469774 4687 generic.go:334] "Generic (PLEG): container finished" podID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerID="3f52a0a7dbd55251e891ce827a37bfce4603522247be1ae37e304874b212f36c" exitCode=0 Nov 25 09:06:57 crc kubenswrapper[4687]: I1125 09:06:57.469841 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qsgtx" event={"ID":"31c9ed99-0727-472a-8b48-285fdaaf558c","Type":"ContainerDied","Data":"3f52a0a7dbd55251e891ce827a37bfce4603522247be1ae37e304874b212f36c"} Nov 25 09:06:57 crc kubenswrapper[4687]: I1125 09:06:57.477933 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"b9ef2435-1dc0-412c-b4fd-fcfd912180ec","Type":"ContainerStarted","Data":"f293447b5a3d4167603454ad2d1dde4d1f3b6ecde00d59607e70b0818bb9e9eb"} Nov 25 09:06:57 crc kubenswrapper[4687]: I1125 09:06:57.477980 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"b9ef2435-1dc0-412c-b4fd-fcfd912180ec","Type":"ContainerStarted","Data":"eb9a6be2cd1c70e6fe7257520c030cf28426ff4da57ce62c2636d6ffd2b79863"} Nov 25 09:06:57 crc kubenswrapper[4687]: I1125 09:06:57.515198 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.5151779100000002 podStartE2EDuration="2.51517791s" podCreationTimestamp="2025-11-25 09:06:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:06:57.512742014 +0000 UTC m=+212.566381722" watchObservedRunningTime="2025-11-25 09:06:57.51517791 +0000 UTC m=+212.568817628" Nov 25 09:06:57 crc kubenswrapper[4687]: I1125 09:06:57.740748 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87d111a8-6128-4b3c-a3de-2143ab121856" path="/var/lib/kubelet/pods/87d111a8-6128-4b3c-a3de-2143ab121856/volumes" Nov 25 09:06:58 crc kubenswrapper[4687]: I1125 09:06:58.321939 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bvzw4" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerName="registry-server" probeResult="failure" output=< Nov 25 09:06:58 crc kubenswrapper[4687]: timeout: failed to connect service ":50051" within 1s Nov 25 09:06:58 crc kubenswrapper[4687]: > Nov 25 09:06:58 crc kubenswrapper[4687]: I1125 09:06:58.484624 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpptd" event={"ID":"ccde1639-05be-47c5-93b8-c1eb83167814","Type":"ContainerStarted","Data":"3917a7ee114715ca9218a4df6f577faaacf4f88b333309eb5cfa742470c2310d"} Nov 25 09:06:58 crc kubenswrapper[4687]: I1125 09:06:58.488064 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qsgtx" event={"ID":"31c9ed99-0727-472a-8b48-285fdaaf558c","Type":"ContainerStarted","Data":"e8982d773651f2e62e7d96d6fdbc3f6385e9b3005f5a931561d67f35280ce968"} Nov 25 09:06:58 crc kubenswrapper[4687]: I1125 09:06:58.490841 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx4ps" event={"ID":"3afcc926-b324-4fe9-933c-4918a88619d9","Type":"ContainerStarted","Data":"90bdd72d9c318d4c97f055cf8bedf1a86acb4aa9e62b720a7c6702a8944b730d"} Nov 25 09:06:58 crc kubenswrapper[4687]: I1125 09:06:58.505533 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gpptd" podStartSLOduration=2.846450397 podStartE2EDuration="55.505504083s" podCreationTimestamp="2025-11-25 09:06:03 +0000 UTC" firstStartedPulling="2025-11-25 09:06:04.994861513 +0000 UTC m=+160.048501231" lastFinishedPulling="2025-11-25 09:06:57.653915199 +0000 UTC m=+212.707554917" observedRunningTime="2025-11-25 09:06:58.503503017 +0000 UTC m=+213.557142745" watchObservedRunningTime="2025-11-25 09:06:58.505504083 +0000 UTC m=+213.559143801" Nov 25 09:06:58 crc kubenswrapper[4687]: I1125 09:06:58.526014 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nx4ps" podStartSLOduration=2.976112236 podStartE2EDuration="53.525993138s" podCreationTimestamp="2025-11-25 09:06:05 +0000 UTC" firstStartedPulling="2025-11-25 09:06:07.021670804 +0000 UTC m=+162.075310522" lastFinishedPulling="2025-11-25 09:06:57.571551706 +0000 UTC m=+212.625191424" observedRunningTime="2025-11-25 09:06:58.525393522 +0000 UTC m=+213.579033250" watchObservedRunningTime="2025-11-25 09:06:58.525993138 +0000 UTC m=+213.579632856" Nov 25 09:07:03 crc kubenswrapper[4687]: I1125 09:07:03.679575 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:07:03 crc kubenswrapper[4687]: I1125 09:07:03.680179 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:07:03 crc kubenswrapper[4687]: I1125 09:07:03.721763 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:07:03 crc kubenswrapper[4687]: I1125 09:07:03.745232 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qsgtx" podStartSLOduration=7.853340048 podStartE2EDuration="58.745215711s" podCreationTimestamp="2025-11-25 09:06:05 +0000 UTC" firstStartedPulling="2025-11-25 09:06:07.019256491 +0000 UTC m=+162.072896209" lastFinishedPulling="2025-11-25 09:06:57.911132154 +0000 UTC m=+212.964771872" observedRunningTime="2025-11-25 09:06:58.545795475 +0000 UTC m=+213.599435193" watchObservedRunningTime="2025-11-25 09:07:03.745215711 +0000 UTC m=+218.798855429" Nov 25 09:07:03 crc kubenswrapper[4687]: I1125 09:07:03.853047 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:07:03 crc kubenswrapper[4687]: I1125 09:07:03.853099 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:07:03 crc kubenswrapper[4687]: I1125 09:07:03.894052 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:07:04 crc kubenswrapper[4687]: I1125 09:07:04.324668 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:07:04 crc kubenswrapper[4687]: I1125 09:07:04.364468 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:07:04 crc kubenswrapper[4687]: I1125 09:07:04.568006 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:07:04 crc kubenswrapper[4687]: I1125 09:07:04.579532 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:07:05 crc kubenswrapper[4687]: I1125 09:07:05.567000 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-49wvg"] Nov 25 09:07:05 crc kubenswrapper[4687]: I1125 09:07:05.567257 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-49wvg" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerName="registry-server" containerID="cri-o://078bc5e927f35f1a98f60a7fe9a0fb2184335b44babf71a0618ea5251a88f766" gracePeriod=2 Nov 25 09:07:05 crc kubenswrapper[4687]: I1125 09:07:05.862462 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:07:05 crc kubenswrapper[4687]: I1125 09:07:05.863503 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:07:05 crc kubenswrapper[4687]: I1125 09:07:05.902772 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:07:06 crc kubenswrapper[4687]: I1125 09:07:06.261763 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:07:06 crc kubenswrapper[4687]: I1125 09:07:06.261814 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:07:06 crc kubenswrapper[4687]: I1125 09:07:06.299898 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:07:06 crc kubenswrapper[4687]: I1125 09:07:06.573786 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:07:06 crc kubenswrapper[4687]: I1125 09:07:06.582371 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:07:07 crc kubenswrapper[4687]: I1125 09:07:07.335343 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:07:07 crc kubenswrapper[4687]: I1125 09:07:07.383462 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:07:07 crc kubenswrapper[4687]: I1125 09:07:07.545305 4687 generic.go:334] "Generic (PLEG): container finished" podID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerID="078bc5e927f35f1a98f60a7fe9a0fb2184335b44babf71a0618ea5251a88f766" exitCode=0 Nov 25 09:07:07 crc kubenswrapper[4687]: I1125 09:07:07.545658 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49wvg" event={"ID":"614abb7b-e915-4b2a-9f57-bbd8ec4d2433","Type":"ContainerDied","Data":"078bc5e927f35f1a98f60a7fe9a0fb2184335b44babf71a0618ea5251a88f766"} Nov 25 09:07:07 crc kubenswrapper[4687]: I1125 09:07:07.958673 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.053662 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-catalog-content\") pod \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.053713 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8bnj\" (UniqueName: \"kubernetes.io/projected/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-kube-api-access-s8bnj\") pod \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.053823 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-utilities\") pod \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\" (UID: \"614abb7b-e915-4b2a-9f57-bbd8ec4d2433\") " Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.054954 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-utilities" (OuterVolumeSpecName: "utilities") pod "614abb7b-e915-4b2a-9f57-bbd8ec4d2433" (UID: "614abb7b-e915-4b2a-9f57-bbd8ec4d2433"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.062722 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-kube-api-access-s8bnj" (OuterVolumeSpecName: "kube-api-access-s8bnj") pod "614abb7b-e915-4b2a-9f57-bbd8ec4d2433" (UID: "614abb7b-e915-4b2a-9f57-bbd8ec4d2433"). InnerVolumeSpecName "kube-api-access-s8bnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.107035 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "614abb7b-e915-4b2a-9f57-bbd8ec4d2433" (UID: "614abb7b-e915-4b2a-9f57-bbd8ec4d2433"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.155320 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.155631 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8bnj\" (UniqueName: \"kubernetes.io/projected/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-kube-api-access-s8bnj\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.155725 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/614abb7b-e915-4b2a-9f57-bbd8ec4d2433-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.554541 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49wvg" event={"ID":"614abb7b-e915-4b2a-9f57-bbd8ec4d2433","Type":"ContainerDied","Data":"e144e3ea88ec45b90ae92b8df951526923ed0d55d4f68acdae674f7666e5d58b"} Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.554581 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49wvg" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.554617 4687 scope.go:117] "RemoveContainer" containerID="078bc5e927f35f1a98f60a7fe9a0fb2184335b44babf71a0618ea5251a88f766" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.575792 4687 scope.go:117] "RemoveContainer" containerID="40cff460501fcbf9c6fe6f9a45db64400e08bde1507764ed45c26ff05e579e28" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.586321 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-49wvg"] Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.589261 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-49wvg"] Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.599773 4687 scope.go:117] "RemoveContainer" containerID="a1d5e5d6a972bc114ab031efa9249a41212ac90bb25d9e2f9354257e24fa8c09" Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.967916 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qsgtx"] Nov 25 09:07:08 crc kubenswrapper[4687]: I1125 09:07:08.968146 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qsgtx" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerName="registry-server" containerID="cri-o://e8982d773651f2e62e7d96d6fdbc3f6385e9b3005f5a931561d67f35280ce968" gracePeriod=2 Nov 25 09:07:09 crc kubenswrapper[4687]: I1125 09:07:09.747403 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" path="/var/lib/kubelet/pods/614abb7b-e915-4b2a-9f57-bbd8ec4d2433/volumes" Nov 25 09:07:10 crc kubenswrapper[4687]: I1125 09:07:10.362517 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bvzw4"] Nov 25 09:07:10 crc kubenswrapper[4687]: I1125 09:07:10.362751 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bvzw4" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerName="registry-server" containerID="cri-o://f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498" gracePeriod=2 Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.291386 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.397411 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjv4j\" (UniqueName: \"kubernetes.io/projected/f14d8d9b-a1bd-49c3-b5ae-85712b344568-kube-api-access-pjv4j\") pod \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.398235 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-catalog-content\") pod \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.398276 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-utilities\") pod \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\" (UID: \"f14d8d9b-a1bd-49c3-b5ae-85712b344568\") " Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.399754 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-utilities" (OuterVolumeSpecName: "utilities") pod "f14d8d9b-a1bd-49c3-b5ae-85712b344568" (UID: "f14d8d9b-a1bd-49c3-b5ae-85712b344568"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.399971 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.409766 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f14d8d9b-a1bd-49c3-b5ae-85712b344568-kube-api-access-pjv4j" (OuterVolumeSpecName: "kube-api-access-pjv4j") pod "f14d8d9b-a1bd-49c3-b5ae-85712b344568" (UID: "f14d8d9b-a1bd-49c3-b5ae-85712b344568"). InnerVolumeSpecName "kube-api-access-pjv4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.502107 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjv4j\" (UniqueName: \"kubernetes.io/projected/f14d8d9b-a1bd-49c3-b5ae-85712b344568-kube-api-access-pjv4j\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.554497 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f14d8d9b-a1bd-49c3-b5ae-85712b344568" (UID: "f14d8d9b-a1bd-49c3-b5ae-85712b344568"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.578102 4687 generic.go:334] "Generic (PLEG): container finished" podID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerID="e8982d773651f2e62e7d96d6fdbc3f6385e9b3005f5a931561d67f35280ce968" exitCode=0 Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.578149 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qsgtx" event={"ID":"31c9ed99-0727-472a-8b48-285fdaaf558c","Type":"ContainerDied","Data":"e8982d773651f2e62e7d96d6fdbc3f6385e9b3005f5a931561d67f35280ce968"} Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.578211 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qsgtx" event={"ID":"31c9ed99-0727-472a-8b48-285fdaaf558c","Type":"ContainerDied","Data":"bf85a45e962602a2aa27e278274c16c716c846a680c1d156f2cf9d2b6201e0aa"} Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.578226 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf85a45e962602a2aa27e278274c16c716c846a680c1d156f2cf9d2b6201e0aa" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.579306 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.580726 4687 generic.go:334] "Generic (PLEG): container finished" podID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerID="f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498" exitCode=0 Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.580958 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bvzw4" event={"ID":"f14d8d9b-a1bd-49c3-b5ae-85712b344568","Type":"ContainerDied","Data":"f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498"} Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.580982 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bvzw4" event={"ID":"f14d8d9b-a1bd-49c3-b5ae-85712b344568","Type":"ContainerDied","Data":"ed0329373030c69054eb8210893b4449ee848c0ee7e7389933ed5b0462d4d851"} Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.580998 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bvzw4" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.581002 4687 scope.go:117] "RemoveContainer" containerID="f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.603563 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f14d8d9b-a1bd-49c3-b5ae-85712b344568-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.613383 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bvzw4"] Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.616374 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bvzw4"] Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.618273 4687 scope.go:117] "RemoveContainer" containerID="aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.637021 4687 scope.go:117] "RemoveContainer" containerID="df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.652884 4687 scope.go:117] "RemoveContainer" containerID="f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498" Nov 25 09:07:11 crc kubenswrapper[4687]: E1125 09:07:11.654314 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498\": container with ID starting with f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498 not found: ID does not exist" containerID="f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.654413 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498"} err="failed to get container status \"f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498\": rpc error: code = NotFound desc = could not find container \"f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498\": container with ID starting with f3b72bf2c20c445be4c63bb723a2a2b1b25f3d26eef602dd172a6d94979b9498 not found: ID does not exist" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.654480 4687 scope.go:117] "RemoveContainer" containerID="aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba" Nov 25 09:07:11 crc kubenswrapper[4687]: E1125 09:07:11.655515 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba\": container with ID starting with aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba not found: ID does not exist" containerID="aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.655547 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba"} err="failed to get container status \"aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba\": rpc error: code = NotFound desc = could not find container \"aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba\": container with ID starting with aa975da8662c4aa4913ce96a447f2e0f59d8f75a88fcfd3afa0dd4da345ba7ba not found: ID does not exist" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.655572 4687 scope.go:117] "RemoveContainer" containerID="df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8" Nov 25 09:07:11 crc kubenswrapper[4687]: E1125 09:07:11.655816 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8\": container with ID starting with df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8 not found: ID does not exist" containerID="df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.655851 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8"} err="failed to get container status \"df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8\": rpc error: code = NotFound desc = could not find container \"df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8\": container with ID starting with df37cbd82a2ed7ac6324c49f79986edf5fed7da2ea2788d7db24e60958286ab8 not found: ID does not exist" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.704653 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-utilities\") pod \"31c9ed99-0727-472a-8b48-285fdaaf558c\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.704954 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-catalog-content\") pod \"31c9ed99-0727-472a-8b48-285fdaaf558c\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.705221 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bx6j\" (UniqueName: \"kubernetes.io/projected/31c9ed99-0727-472a-8b48-285fdaaf558c-kube-api-access-8bx6j\") pod \"31c9ed99-0727-472a-8b48-285fdaaf558c\" (UID: \"31c9ed99-0727-472a-8b48-285fdaaf558c\") " Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.705595 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-utilities" (OuterVolumeSpecName: "utilities") pod "31c9ed99-0727-472a-8b48-285fdaaf558c" (UID: "31c9ed99-0727-472a-8b48-285fdaaf558c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.706013 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.709344 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31c9ed99-0727-472a-8b48-285fdaaf558c-kube-api-access-8bx6j" (OuterVolumeSpecName: "kube-api-access-8bx6j") pod "31c9ed99-0727-472a-8b48-285fdaaf558c" (UID: "31c9ed99-0727-472a-8b48-285fdaaf558c"). InnerVolumeSpecName "kube-api-access-8bx6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.726912 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31c9ed99-0727-472a-8b48-285fdaaf558c" (UID: "31c9ed99-0727-472a-8b48-285fdaaf558c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.742242 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" path="/var/lib/kubelet/pods/f14d8d9b-a1bd-49c3-b5ae-85712b344568/volumes" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.809561 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bx6j\" (UniqueName: \"kubernetes.io/projected/31c9ed99-0727-472a-8b48-285fdaaf558c-kube-api-access-8bx6j\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:11 crc kubenswrapper[4687]: I1125 09:07:11.809599 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31c9ed99-0727-472a-8b48-285fdaaf558c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:12 crc kubenswrapper[4687]: I1125 09:07:12.586036 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qsgtx" Nov 25 09:07:12 crc kubenswrapper[4687]: I1125 09:07:12.606457 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qsgtx"] Nov 25 09:07:12 crc kubenswrapper[4687]: I1125 09:07:12.610159 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qsgtx"] Nov 25 09:07:13 crc kubenswrapper[4687]: I1125 09:07:13.741930 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" path="/var/lib/kubelet/pods/31c9ed99-0727-472a-8b48-285fdaaf558c/volumes" Nov 25 09:07:15 crc kubenswrapper[4687]: I1125 09:07:15.996255 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-c7v75"] Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.514613 4687 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515592 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515614 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515657 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerName="extract-utilities" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515670 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerName="extract-utilities" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515696 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515708 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515726 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87d111a8-6128-4b3c-a3de-2143ab121856" containerName="extract-content" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515737 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="87d111a8-6128-4b3c-a3de-2143ab121856" containerName="extract-content" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515756 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87d111a8-6128-4b3c-a3de-2143ab121856" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515768 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="87d111a8-6128-4b3c-a3de-2143ab121856" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515791 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerName="extract-content" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515803 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerName="extract-content" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515820 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerName="extract-utilities" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515831 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerName="extract-utilities" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515847 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515859 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515873 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerName="extract-utilities" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515884 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerName="extract-utilities" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515903 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87d111a8-6128-4b3c-a3de-2143ab121856" containerName="extract-utilities" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515915 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="87d111a8-6128-4b3c-a3de-2143ab121856" containerName="extract-utilities" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515930 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerName="extract-content" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515942 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerName="extract-content" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.515961 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerName="extract-content" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.515973 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerName="extract-content" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.516176 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="87d111a8-6128-4b3c-a3de-2143ab121856" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.516215 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="31c9ed99-0727-472a-8b48-285fdaaf558c" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.516238 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14d8d9b-a1bd-49c3-b5ae-85712b344568" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.516263 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="614abb7b-e915-4b2a-9f57-bbd8ec4d2433" containerName="registry-server" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.518822 4687 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.519157 4687 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.519520 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f" gracePeriod=15 Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.519803 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8" gracePeriod=15 Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.519862 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0" gracePeriod=15 Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520054 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48" gracePeriod=15 Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520170 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520069 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce" gracePeriod=15 Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.520461 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520503 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.520552 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520560 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.520574 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520580 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.520591 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520597 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.520613 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520619 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.520630 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520637 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.520652 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520659 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.520680 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.520691 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.521079 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.521094 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.521108 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.521120 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.521132 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.521147 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.521160 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.530633 4687 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.571141 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.612273 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.612362 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.612467 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.612502 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.612558 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.612611 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.612653 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.612700 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714186 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714261 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714294 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714337 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714372 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714417 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714438 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714460 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714559 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714610 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714641 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714668 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714673 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714697 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714633 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.714726 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.722726 4687 generic.go:334] "Generic (PLEG): container finished" podID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" containerID="f293447b5a3d4167603454ad2d1dde4d1f3b6ecde00d59607e70b0818bb9e9eb" exitCode=0 Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.722786 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"b9ef2435-1dc0-412c-b4fd-fcfd912180ec","Type":"ContainerDied","Data":"f293447b5a3d4167603454ad2d1dde4d1f3b6ecde00d59607e70b0818bb9e9eb"} Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.723615 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.723801 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.724905 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.726191 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.727063 4687 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8" exitCode=0 Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.727086 4687 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce" exitCode=0 Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.727096 4687 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0" exitCode=0 Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.727105 4687 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48" exitCode=2 Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.727136 4687 scope.go:117] "RemoveContainer" containerID="9df50820899b5838bc02816ab5aad9f0b90b17fe963afa55b97a1ac9257c6be1" Nov 25 09:07:34 crc kubenswrapper[4687]: I1125 09:07:34.864983 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:07:34 crc kubenswrapper[4687]: W1125 09:07:34.885072 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-189e38e20c47d835156738ebc11a0f5a39669069ace34045fd06341cf4c25e41 WatchSource:0}: Error finding container 189e38e20c47d835156738ebc11a0f5a39669069ace34045fd06341cf4c25e41: Status 404 returned error can't find the container with id 189e38e20c47d835156738ebc11a0f5a39669069ace34045fd06341cf4c25e41 Nov 25 09:07:34 crc kubenswrapper[4687]: E1125 09:07:34.889887 4687 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.246:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b34ba8d8c9116 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 09:07:34.889042198 +0000 UTC m=+249.942681956,LastTimestamp:2025-11-25 09:07:34.889042198 +0000 UTC m=+249.942681956,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.738199 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.738289 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.738556 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.738800 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.739049 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.742952 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa"} Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.743009 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"189e38e20c47d835156738ebc11a0f5a39669069ace34045fd06341cf4c25e41"} Nov 25 09:07:35 crc kubenswrapper[4687]: E1125 09:07:35.771380 4687 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:35 crc kubenswrapper[4687]: E1125 09:07:35.772841 4687 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:35 crc kubenswrapper[4687]: E1125 09:07:35.773364 4687 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:35 crc kubenswrapper[4687]: E1125 09:07:35.773738 4687 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:35 crc kubenswrapper[4687]: E1125 09:07:35.773984 4687 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.774024 4687 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 09:07:35 crc kubenswrapper[4687]: E1125 09:07:35.774433 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="200ms" Nov 25 09:07:35 crc kubenswrapper[4687]: E1125 09:07:35.975015 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="400ms" Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.977989 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.978542 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:35 crc kubenswrapper[4687]: I1125 09:07:35.978948 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.031181 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kube-api-access\") pod \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.031245 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-var-lock\") pod \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.031269 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kubelet-dir\") pod \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\" (UID: \"b9ef2435-1dc0-412c-b4fd-fcfd912180ec\") " Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.031543 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b9ef2435-1dc0-412c-b4fd-fcfd912180ec" (UID: "b9ef2435-1dc0-412c-b4fd-fcfd912180ec"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.031576 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-var-lock" (OuterVolumeSpecName: "var-lock") pod "b9ef2435-1dc0-412c-b4fd-fcfd912180ec" (UID: "b9ef2435-1dc0-412c-b4fd-fcfd912180ec"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.036926 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b9ef2435-1dc0-412c-b4fd-fcfd912180ec" (UID: "b9ef2435-1dc0-412c-b4fd-fcfd912180ec"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.132842 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.132895 4687 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.132913 4687 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b9ef2435-1dc0-412c-b4fd-fcfd912180ec-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:36 crc kubenswrapper[4687]: E1125 09:07:36.377061 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="800ms" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.748135 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.748768 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"b9ef2435-1dc0-412c-b4fd-fcfd912180ec","Type":"ContainerDied","Data":"eb9a6be2cd1c70e6fe7257520c030cf28426ff4da57ce62c2636d6ffd2b79863"} Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.748806 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb9a6be2cd1c70e6fe7257520c030cf28426ff4da57ce62c2636d6ffd2b79863" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.764351 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.764551 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.880244 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.881167 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.881955 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.882387 4687 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.882788 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.944797 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.944929 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.944943 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.944998 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.945065 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.945174 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.945568 4687 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.945618 4687 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:36 crc kubenswrapper[4687]: I1125 09:07:36.945633 4687 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:37 crc kubenswrapper[4687]: E1125 09:07:37.105763 4687 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.246:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b34ba8d8c9116 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 09:07:34.889042198 +0000 UTC m=+249.942681956,LastTimestamp:2025-11-25 09:07:34.889042198 +0000 UTC m=+249.942681956,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 09:07:37 crc kubenswrapper[4687]: E1125 09:07:37.178357 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="1.6s" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.742745 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.754164 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.754834 4687 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f" exitCode=0 Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.754888 4687 scope.go:117] "RemoveContainer" containerID="91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.755011 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.755601 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.755790 4687 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.755960 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.773372 4687 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.773722 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.774569 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.777037 4687 scope.go:117] "RemoveContainer" containerID="bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.789336 4687 scope.go:117] "RemoveContainer" containerID="34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.805494 4687 scope.go:117] "RemoveContainer" containerID="d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.823838 4687 scope.go:117] "RemoveContainer" containerID="b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.848486 4687 scope.go:117] "RemoveContainer" containerID="32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.867648 4687 scope.go:117] "RemoveContainer" containerID="91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8" Nov 25 09:07:37 crc kubenswrapper[4687]: E1125 09:07:37.868028 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\": container with ID starting with 91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8 not found: ID does not exist" containerID="91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.868062 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8"} err="failed to get container status \"91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\": rpc error: code = NotFound desc = could not find container \"91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8\": container with ID starting with 91222e7c69fde384109dba0599b8567a76c60c73c9746abefbee65a201dbe4d8 not found: ID does not exist" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.868084 4687 scope.go:117] "RemoveContainer" containerID="bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce" Nov 25 09:07:37 crc kubenswrapper[4687]: E1125 09:07:37.868284 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\": container with ID starting with bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce not found: ID does not exist" containerID="bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.868356 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce"} err="failed to get container status \"bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\": rpc error: code = NotFound desc = could not find container \"bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce\": container with ID starting with bd13a9ed5492a12e2438b1c91646bf2c078952e69219eae4659379c055e47dce not found: ID does not exist" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.868372 4687 scope.go:117] "RemoveContainer" containerID="34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0" Nov 25 09:07:37 crc kubenswrapper[4687]: E1125 09:07:37.868643 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\": container with ID starting with 34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0 not found: ID does not exist" containerID="34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.868664 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0"} err="failed to get container status \"34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\": rpc error: code = NotFound desc = could not find container \"34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0\": container with ID starting with 34e69f72d0a0b2346c2ab52a8b3140ab37ce89051b7b217965fbd3107c74d3b0 not found: ID does not exist" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.868700 4687 scope.go:117] "RemoveContainer" containerID="d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48" Nov 25 09:07:37 crc kubenswrapper[4687]: E1125 09:07:37.869095 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\": container with ID starting with d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48 not found: ID does not exist" containerID="d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.869123 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48"} err="failed to get container status \"d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\": rpc error: code = NotFound desc = could not find container \"d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48\": container with ID starting with d66f19d79572c016a2c8f70a38491d82c4b20e9bd613768a766e17985341fe48 not found: ID does not exist" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.869140 4687 scope.go:117] "RemoveContainer" containerID="b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f" Nov 25 09:07:37 crc kubenswrapper[4687]: E1125 09:07:37.869702 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\": container with ID starting with b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f not found: ID does not exist" containerID="b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.869730 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f"} err="failed to get container status \"b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\": rpc error: code = NotFound desc = could not find container \"b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f\": container with ID starting with b22fc292918d27907e81cf72c002bc3ce7ad95dc78b3ae98c1a69932a6fff18f not found: ID does not exist" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.869901 4687 scope.go:117] "RemoveContainer" containerID="32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73" Nov 25 09:07:37 crc kubenswrapper[4687]: E1125 09:07:37.870393 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\": container with ID starting with 32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73 not found: ID does not exist" containerID="32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73" Nov 25 09:07:37 crc kubenswrapper[4687]: I1125 09:07:37.870417 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73"} err="failed to get container status \"32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\": rpc error: code = NotFound desc = could not find container \"32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73\": container with ID starting with 32f803afcba1e3999848901f8ed00e9162b9be217a851a72c6fa1d4c413b0b73 not found: ID does not exist" Nov 25 09:07:38 crc kubenswrapper[4687]: E1125 09:07:38.780051 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="3.2s" Nov 25 09:07:41 crc kubenswrapper[4687]: I1125 09:07:41.019960 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" containerName="oauth-openshift" containerID="cri-o://80c6f0893dabe999197748c8e16143445b20dd3101831aae70776808e6ab35fa" gracePeriod=15 Nov 25 09:07:41 crc kubenswrapper[4687]: I1125 09:07:41.785100 4687 generic.go:334] "Generic (PLEG): container finished" podID="69faad41-a827-4fd3-b43e-036297dc2c9f" containerID="80c6f0893dabe999197748c8e16143445b20dd3101831aae70776808e6ab35fa" exitCode=0 Nov 25 09:07:41 crc kubenswrapper[4687]: I1125 09:07:41.785166 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" event={"ID":"69faad41-a827-4fd3-b43e-036297dc2c9f","Type":"ContainerDied","Data":"80c6f0893dabe999197748c8e16143445b20dd3101831aae70776808e6ab35fa"} Nov 25 09:07:41 crc kubenswrapper[4687]: E1125 09:07:41.982703 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="6.4s" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.078467 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.079164 4687 status_manager.go:851] "Failed to get status for pod" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-c7v75\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.079557 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.080165 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.113925 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-dir\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114043 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-cliconfig\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114116 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-provider-selection\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114110 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114145 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-ocp-branding-template\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114193 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-idp-0-file-data\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114230 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-trusted-ca-bundle\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114282 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-router-certs\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114305 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-service-ca\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114548 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-session\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114593 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-error\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114618 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-login\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.114676 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-policies\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.115515 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.115664 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smzws\" (UniqueName: \"kubernetes.io/projected/69faad41-a827-4fd3-b43e-036297dc2c9f-kube-api-access-smzws\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.115726 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-serving-cert\") pod \"69faad41-a827-4fd3-b43e-036297dc2c9f\" (UID: \"69faad41-a827-4fd3-b43e-036297dc2c9f\") " Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.115709 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.116219 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.116485 4687 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.116522 4687 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/69faad41-a827-4fd3-b43e-036297dc2c9f-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.116535 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.116548 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.120301 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.120780 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.121321 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.121659 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.123137 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.123263 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69faad41-a827-4fd3-b43e-036297dc2c9f-kube-api-access-smzws" (OuterVolumeSpecName: "kube-api-access-smzws") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "kube-api-access-smzws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.124772 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.124979 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.125881 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.127015 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "69faad41-a827-4fd3-b43e-036297dc2c9f" (UID: "69faad41-a827-4fd3-b43e-036297dc2c9f"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.217318 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smzws\" (UniqueName: \"kubernetes.io/projected/69faad41-a827-4fd3-b43e-036297dc2c9f-kube-api-access-smzws\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.217360 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.217380 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.217395 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.217409 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.217422 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.217435 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.217447 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.217460 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.217472 4687 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/69faad41-a827-4fd3-b43e-036297dc2c9f-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.794603 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" event={"ID":"69faad41-a827-4fd3-b43e-036297dc2c9f","Type":"ContainerDied","Data":"6adad7d5702b7f3841a544aa3960d52c0f1eb70c482375eab20326e4b9768c82"} Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.794694 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.794714 4687 scope.go:117] "RemoveContainer" containerID="80c6f0893dabe999197748c8e16143445b20dd3101831aae70776808e6ab35fa" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.795372 4687 status_manager.go:851] "Failed to get status for pod" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-c7v75\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.795595 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.795881 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:42 crc kubenswrapper[4687]: E1125 09:07:42.811350 4687 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.246:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" volumeName="registry-storage" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.822888 4687 status_manager.go:851] "Failed to get status for pod" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-c7v75\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.823459 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:42 crc kubenswrapper[4687]: I1125 09:07:42.824053 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:45 crc kubenswrapper[4687]: I1125 09:07:45.741375 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:45 crc kubenswrapper[4687]: I1125 09:07:45.743276 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:45 crc kubenswrapper[4687]: I1125 09:07:45.743813 4687 status_manager.go:851] "Failed to get status for pod" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-c7v75\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:46 crc kubenswrapper[4687]: I1125 09:07:46.830935 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 09:07:46 crc kubenswrapper[4687]: I1125 09:07:46.831179 4687 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3" exitCode=1 Nov 25 09:07:46 crc kubenswrapper[4687]: I1125 09:07:46.831206 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3"} Nov 25 09:07:46 crc kubenswrapper[4687]: I1125 09:07:46.831687 4687 scope.go:117] "RemoveContainer" containerID="a1ce5f62cc98dedf0816990020a71f88b77995ec5476a63e9aead6caad4c33a3" Nov 25 09:07:46 crc kubenswrapper[4687]: I1125 09:07:46.832533 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:46 crc kubenswrapper[4687]: I1125 09:07:46.833020 4687 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:46 crc kubenswrapper[4687]: I1125 09:07:46.834774 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:46 crc kubenswrapper[4687]: I1125 09:07:46.835132 4687 status_manager.go:851] "Failed to get status for pod" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-c7v75\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:47 crc kubenswrapper[4687]: E1125 09:07:47.107443 4687 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.246:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b34ba8d8c9116 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 09:07:34.889042198 +0000 UTC m=+249.942681956,LastTimestamp:2025-11-25 09:07:34.889042198 +0000 UTC m=+249.942681956,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.734353 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.735434 4687 status_manager.go:851] "Failed to get status for pod" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-c7v75\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.736021 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.736529 4687 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.736859 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.749864 4687 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.750096 4687 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:07:47 crc kubenswrapper[4687]: E1125 09:07:47.750970 4687 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.751702 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.843328 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.843434 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f518d546617035718e4bdc0cc15b8b1aeb6ccb48a3b69a7d45fea58fabfecc70"} Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.844449 4687 status_manager.go:851] "Failed to get status for pod" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-c7v75\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.844719 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fd61aa18133c4ae90b4ce4ca301e211c058501ef1339b017888854d1051580cf"} Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.845021 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.845439 4687 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:47 crc kubenswrapper[4687]: I1125 09:07:47.845909 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:48 crc kubenswrapper[4687]: E1125 09:07:48.083906 4687 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-conmon-266caf8695886dc05e11c37df144b10e4f2551c45579038d917e971b4fac4e18.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-266caf8695886dc05e11c37df144b10e4f2551c45579038d917e971b4fac4e18.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:07:48 crc kubenswrapper[4687]: E1125 09:07:48.384865 4687 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.246:6443: connect: connection refused" interval="7s" Nov 25 09:07:48 crc kubenswrapper[4687]: I1125 09:07:48.852303 4687 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="266caf8695886dc05e11c37df144b10e4f2551c45579038d917e971b4fac4e18" exitCode=0 Nov 25 09:07:48 crc kubenswrapper[4687]: I1125 09:07:48.852352 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"266caf8695886dc05e11c37df144b10e4f2551c45579038d917e971b4fac4e18"} Nov 25 09:07:48 crc kubenswrapper[4687]: I1125 09:07:48.852868 4687 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:07:48 crc kubenswrapper[4687]: I1125 09:07:48.852902 4687 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:07:48 crc kubenswrapper[4687]: I1125 09:07:48.853190 4687 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:48 crc kubenswrapper[4687]: E1125 09:07:48.853413 4687 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:48 crc kubenswrapper[4687]: I1125 09:07:48.853484 4687 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:48 crc kubenswrapper[4687]: I1125 09:07:48.853708 4687 status_manager.go:851] "Failed to get status for pod" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:48 crc kubenswrapper[4687]: I1125 09:07:48.853891 4687 status_manager.go:851] "Failed to get status for pod" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" pod="openshift-authentication/oauth-openshift-558db77b4-c7v75" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-c7v75\": dial tcp 38.102.83.246:6443: connect: connection refused" Nov 25 09:07:49 crc kubenswrapper[4687]: I1125 09:07:49.865936 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"08866cb0e9eb83f6f76ed51f9aa0b4cdc7417708ada200fe1dad5345fc0bd2ab"} Nov 25 09:07:49 crc kubenswrapper[4687]: I1125 09:07:49.865983 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"89247e2d6474a7873672da196208873dc950a6426a24bdf1c8204c5064b87c13"} Nov 25 09:07:49 crc kubenswrapper[4687]: I1125 09:07:49.865997 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ca4ada49367d768b7781cee3e3d06b41e1bd67d2ece44f891da2c5bc17ad9052"} Nov 25 09:07:50 crc kubenswrapper[4687]: I1125 09:07:50.873289 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0ce61f27fbac4151ada0c6ca5f0b55cc9008c60c80356de829161b3f637d3090"} Nov 25 09:07:50 crc kubenswrapper[4687]: I1125 09:07:50.873331 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3d9d6b367ce34be3470885e5051da2b206c7af67243bd9e45e419a76c11331dc"} Nov 25 09:07:50 crc kubenswrapper[4687]: I1125 09:07:50.873478 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:50 crc kubenswrapper[4687]: I1125 09:07:50.873539 4687 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:07:50 crc kubenswrapper[4687]: I1125 09:07:50.873560 4687 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:07:50 crc kubenswrapper[4687]: I1125 09:07:50.937677 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:07:50 crc kubenswrapper[4687]: I1125 09:07:50.946102 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:07:51 crc kubenswrapper[4687]: I1125 09:07:51.879688 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:07:52 crc kubenswrapper[4687]: I1125 09:07:52.752850 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:52 crc kubenswrapper[4687]: I1125 09:07:52.753249 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:52 crc kubenswrapper[4687]: I1125 09:07:52.762311 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:55 crc kubenswrapper[4687]: I1125 09:07:55.885265 4687 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:55 crc kubenswrapper[4687]: I1125 09:07:55.906830 4687 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:07:55 crc kubenswrapper[4687]: I1125 09:07:55.906860 4687 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:07:55 crc kubenswrapper[4687]: I1125 09:07:55.911764 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:07:56 crc kubenswrapper[4687]: I1125 09:07:56.016971 4687 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="d924ea57-f9e5-4385-b375-358a0de792d5" Nov 25 09:07:56 crc kubenswrapper[4687]: I1125 09:07:56.912709 4687 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:07:56 crc kubenswrapper[4687]: I1125 09:07:56.912758 4687 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:07:56 crc kubenswrapper[4687]: I1125 09:07:56.917254 4687 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="d924ea57-f9e5-4385-b375-358a0de792d5" Nov 25 09:08:02 crc kubenswrapper[4687]: I1125 09:08:02.783122 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:08:05 crc kubenswrapper[4687]: I1125 09:08:05.617332 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 09:08:06 crc kubenswrapper[4687]: I1125 09:08:06.180697 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 09:08:06 crc kubenswrapper[4687]: I1125 09:08:06.285085 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 09:08:06 crc kubenswrapper[4687]: I1125 09:08:06.538334 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 09:08:06 crc kubenswrapper[4687]: I1125 09:08:06.785470 4687 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 09:08:07 crc kubenswrapper[4687]: I1125 09:08:07.263556 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 09:08:07 crc kubenswrapper[4687]: I1125 09:08:07.357625 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 09:08:07 crc kubenswrapper[4687]: I1125 09:08:07.382325 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 09:08:07 crc kubenswrapper[4687]: I1125 09:08:07.434660 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 09:08:07 crc kubenswrapper[4687]: I1125 09:08:07.586968 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:08:07 crc kubenswrapper[4687]: I1125 09:08:07.653308 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 09:08:07 crc kubenswrapper[4687]: I1125 09:08:07.692775 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 09:08:07 crc kubenswrapper[4687]: I1125 09:08:07.799805 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 09:08:07 crc kubenswrapper[4687]: I1125 09:08:07.842045 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 09:08:07 crc kubenswrapper[4687]: I1125 09:08:07.916160 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 09:08:08 crc kubenswrapper[4687]: I1125 09:08:08.030384 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 09:08:08 crc kubenswrapper[4687]: I1125 09:08:08.174864 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 09:08:08 crc kubenswrapper[4687]: I1125 09:08:08.339241 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 09:08:08 crc kubenswrapper[4687]: I1125 09:08:08.460934 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 09:08:08 crc kubenswrapper[4687]: I1125 09:08:08.490668 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 09:08:08 crc kubenswrapper[4687]: I1125 09:08:08.513397 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 09:08:08 crc kubenswrapper[4687]: I1125 09:08:08.882393 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.083564 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.120247 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.141259 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.150018 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.161492 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.172649 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.587665 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.727621 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.806860 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.827346 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.950162 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:08:09 crc kubenswrapper[4687]: I1125 09:08:09.956645 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.126908 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.155291 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.255546 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.372391 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.469621 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.518762 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.560987 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.582571 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.618611 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.618674 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.657517 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.743403 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.743435 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.845096 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.858802 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.914827 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.948073 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.959523 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.970041 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 09:08:10 crc kubenswrapper[4687]: I1125 09:08:10.974640 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.033342 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.037806 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.057811 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.132710 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.137099 4687 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.162031 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.190956 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.201413 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.228169 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.230545 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.239111 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.403423 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.667658 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.687977 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.776321 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.831154 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.840372 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 09:08:11 crc kubenswrapper[4687]: I1125 09:08:11.880194 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.165316 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.170894 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.195921 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.286242 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.318305 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.320540 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.388754 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.396614 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.397046 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.489604 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.518461 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.552477 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.609932 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.618624 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.623073 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.653333 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.658189 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.679673 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.692468 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.704818 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.817010 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.822855 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.927477 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 09:08:12 crc kubenswrapper[4687]: I1125 09:08:12.961812 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.010693 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.062594 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.156922 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.213031 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.221311 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.262585 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.376395 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.444075 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.466096 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.632374 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.691937 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.712783 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.743032 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.798934 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 09:08:13 crc kubenswrapper[4687]: I1125 09:08:13.941302 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.007141 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.058214 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.139630 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.174935 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.211943 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.246546 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.334770 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.335914 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.341880 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.407525 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.459857 4687 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.540597 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.564015 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.612418 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.620694 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.637733 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.782031 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.801075 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.947903 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.976661 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.981209 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 09:08:14 crc kubenswrapper[4687]: I1125 09:08:14.993009 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.033617 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.056601 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.081039 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.119231 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.127220 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.173568 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.259282 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.265869 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.421982 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.431570 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.444103 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.497914 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.507825 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.552080 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.634696 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.725149 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.766604 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.782471 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.855421 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.961803 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 09:08:15 crc kubenswrapper[4687]: I1125 09:08:15.978894 4687 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.014106 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.101077 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.117380 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.176022 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.213736 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.314142 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.373130 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.492682 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.509429 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.538379 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.725570 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.732435 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.802682 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.896730 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.911842 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.915358 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 09:08:16 crc kubenswrapper[4687]: I1125 09:08:16.967933 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.029293 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.069998 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.266389 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.305671 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.378953 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.446877 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.452591 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.471730 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.494791 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.498803 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.535049 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.652655 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.680533 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.710590 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.803819 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.850384 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.857349 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.894535 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.905884 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 09:08:17 crc kubenswrapper[4687]: I1125 09:08:17.946379 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.006233 4687 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.007828 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=44.007806348 podStartE2EDuration="44.007806348s" podCreationTimestamp="2025-11-25 09:07:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:07:55.942346186 +0000 UTC m=+270.995985914" watchObservedRunningTime="2025-11-25 09:08:18.007806348 +0000 UTC m=+293.061446076" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.012223 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-c7v75"] Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.012295 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-6869cbc5df-ff8dk"] Nov 25 09:08:18 crc kubenswrapper[4687]: E1125 09:08:18.012558 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" containerName="oauth-openshift" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.012587 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" containerName="oauth-openshift" Nov 25 09:08:18 crc kubenswrapper[4687]: E1125 09:08:18.012610 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" containerName="installer" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.012622 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" containerName="installer" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.012790 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" containerName="oauth-openshift" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.012807 4687 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.012834 4687 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="4cd94423-57c0-4ab8-ae45-b2bb6c3dee7a" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.012818 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9ef2435-1dc0-412c-b4fd-fcfd912180ec" containerName="installer" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.013490 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.017046 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.017238 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.017265 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.017292 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.017357 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.017435 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.017460 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.017533 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.017670 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.018300 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.018588 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.021352 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.027136 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.030485 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.032834 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.038301 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.045353 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=23.045332364 podStartE2EDuration="23.045332364s" podCreationTimestamp="2025-11-25 09:07:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:18.044313595 +0000 UTC m=+293.097953373" watchObservedRunningTime="2025-11-25 09:08:18.045332364 +0000 UTC m=+293.098972102" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.073293 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.082877 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114112 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-router-certs\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114220 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2zdd\" (UniqueName: \"kubernetes.io/projected/c3875200-8484-4c57-967c-239e818605fb-kube-api-access-m2zdd\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114270 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114309 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114349 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114381 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114429 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-audit-policies\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114460 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114547 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-template-login\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114598 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114635 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-session\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114667 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c3875200-8484-4c57-967c-239e818605fb-audit-dir\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114701 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-service-ca\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.114738 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-template-error\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.136705 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.179327 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.196073 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.215905 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.215952 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.215975 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216005 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-audit-policies\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216022 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216055 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-template-login\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216078 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216100 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-session\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216120 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c3875200-8484-4c57-967c-239e818605fb-audit-dir\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216140 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-service-ca\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216157 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-template-error\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216190 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-router-certs\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216212 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2zdd\" (UniqueName: \"kubernetes.io/projected/c3875200-8484-4c57-967c-239e818605fb-kube-api-access-m2zdd\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216242 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.216574 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c3875200-8484-4c57-967c-239e818605fb-audit-dir\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.217833 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-audit-policies\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.217956 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-service-ca\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.218173 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.218337 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.222264 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.222273 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-session\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.222445 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.222664 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-template-login\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.223035 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-router-certs\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.223039 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-user-template-error\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.224353 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.224972 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c3875200-8484-4c57-967c-239e818605fb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.233970 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2zdd\" (UniqueName: \"kubernetes.io/projected/c3875200-8484-4c57-967c-239e818605fb-kube-api-access-m2zdd\") pod \"oauth-openshift-6869cbc5df-ff8dk\" (UID: \"c3875200-8484-4c57-967c-239e818605fb\") " pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.341795 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.438176 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.517781 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.526441 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.548835 4687 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.549054 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa" gracePeriod=5 Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.553808 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.554582 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.570779 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.698092 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.710767 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.747635 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6869cbc5df-ff8dk"] Nov 25 09:08:18 crc kubenswrapper[4687]: I1125 09:08:18.924558 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.006406 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.180489 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.200304 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" event={"ID":"c3875200-8484-4c57-967c-239e818605fb","Type":"ContainerStarted","Data":"a969fb73e4dbab11edb2e591808b33d58ed770edac45d2e2012c491a40481456"} Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.200395 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" event={"ID":"c3875200-8484-4c57-967c-239e818605fb","Type":"ContainerStarted","Data":"f1a090b283dd36add886e7c9dc8c7fac3d11534090962137b3c1c606625feb54"} Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.201041 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.260990 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.312274 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.358029 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.382906 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.392605 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.407662 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.410188 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6869cbc5df-ff8dk" podStartSLOduration=63.410163138 podStartE2EDuration="1m3.410163138s" podCreationTimestamp="2025-11-25 09:07:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:19.225450292 +0000 UTC m=+294.279090040" watchObservedRunningTime="2025-11-25 09:08:19.410163138 +0000 UTC m=+294.463802856" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.597709 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.643626 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.695145 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.736059 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.742485 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69faad41-a827-4fd3-b43e-036297dc2c9f" path="/var/lib/kubelet/pods/69faad41-a827-4fd3-b43e-036297dc2c9f/volumes" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.750746 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.754427 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 09:08:19 crc kubenswrapper[4687]: I1125 09:08:19.852749 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 09:08:20 crc kubenswrapper[4687]: I1125 09:08:20.408213 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 09:08:20 crc kubenswrapper[4687]: I1125 09:08:20.434059 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 09:08:20 crc kubenswrapper[4687]: I1125 09:08:20.484756 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 09:08:20 crc kubenswrapper[4687]: I1125 09:08:20.490049 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 09:08:20 crc kubenswrapper[4687]: I1125 09:08:20.527770 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 09:08:20 crc kubenswrapper[4687]: I1125 09:08:20.544000 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 09:08:20 crc kubenswrapper[4687]: I1125 09:08:20.731381 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 09:08:20 crc kubenswrapper[4687]: I1125 09:08:20.738558 4687 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 09:08:20 crc kubenswrapper[4687]: I1125 09:08:20.908609 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 09:08:21 crc kubenswrapper[4687]: I1125 09:08:21.149859 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 09:08:21 crc kubenswrapper[4687]: I1125 09:08:21.160238 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:08:21 crc kubenswrapper[4687]: I1125 09:08:21.189463 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 09:08:21 crc kubenswrapper[4687]: I1125 09:08:21.301296 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 09:08:21 crc kubenswrapper[4687]: I1125 09:08:21.381760 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 09:08:21 crc kubenswrapper[4687]: I1125 09:08:21.426458 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 09:08:21 crc kubenswrapper[4687]: I1125 09:08:21.719951 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 09:08:22 crc kubenswrapper[4687]: I1125 09:08:22.439227 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.118147 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.118244 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.209475 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.209581 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.209605 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.209641 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.209675 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.209766 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.209869 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.209850 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.209895 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.210587 4687 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.210616 4687 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.210632 4687 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.210643 4687 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.218382 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.240065 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.240159 4687 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa" exitCode=137 Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.240220 4687 scope.go:117] "RemoveContainer" containerID="88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.240265 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.266719 4687 scope.go:117] "RemoveContainer" containerID="88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa" Nov 25 09:08:24 crc kubenswrapper[4687]: E1125 09:08:24.267383 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa\": container with ID starting with 88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa not found: ID does not exist" containerID="88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.267566 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa"} err="failed to get container status \"88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa\": rpc error: code = NotFound desc = could not find container \"88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa\": container with ID starting with 88b43efbefb8d9e9d28f0f4945b7278bce0e3801ce363458013cab03fc1865aa not found: ID does not exist" Nov 25 09:08:24 crc kubenswrapper[4687]: I1125 09:08:24.312919 4687 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:25 crc kubenswrapper[4687]: I1125 09:08:25.742190 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 09:08:25 crc kubenswrapper[4687]: I1125 09:08:25.742791 4687 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 25 09:08:25 crc kubenswrapper[4687]: I1125 09:08:25.770820 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 09:08:25 crc kubenswrapper[4687]: I1125 09:08:25.770873 4687 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="5d535618-656f-474b-8f65-50997bc41d75" Nov 25 09:08:25 crc kubenswrapper[4687]: I1125 09:08:25.776183 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 09:08:25 crc kubenswrapper[4687]: I1125 09:08:25.776377 4687 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="5d535618-656f-474b-8f65-50997bc41d75" Nov 25 09:08:41 crc kubenswrapper[4687]: I1125 09:08:41.860769 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2qjbv"] Nov 25 09:08:41 crc kubenswrapper[4687]: I1125 09:08:41.861517 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" podUID="e8238e53-faf8-4dc1-a726-76368f0319be" containerName="controller-manager" containerID="cri-o://0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0" gracePeriod=30 Nov 25 09:08:41 crc kubenswrapper[4687]: I1125 09:08:41.965900 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf"] Nov 25 09:08:41 crc kubenswrapper[4687]: I1125 09:08:41.966455 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" podUID="0a45ab31-45db-4069-8da2-4c53cd2689ca" containerName="route-controller-manager" containerID="cri-o://ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a" gracePeriod=30 Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.226785 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.344019 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.344429 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e8238e53-faf8-4dc1-a726-76368f0319be-serving-cert\") pod \"e8238e53-faf8-4dc1-a726-76368f0319be\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.344484 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jc7gm\" (UniqueName: \"kubernetes.io/projected/e8238e53-faf8-4dc1-a726-76368f0319be-kube-api-access-jc7gm\") pod \"e8238e53-faf8-4dc1-a726-76368f0319be\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.344549 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-proxy-ca-bundles\") pod \"e8238e53-faf8-4dc1-a726-76368f0319be\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.344630 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-client-ca\") pod \"e8238e53-faf8-4dc1-a726-76368f0319be\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.344665 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-config\") pod \"e8238e53-faf8-4dc1-a726-76368f0319be\" (UID: \"e8238e53-faf8-4dc1-a726-76368f0319be\") " Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.345681 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "e8238e53-faf8-4dc1-a726-76368f0319be" (UID: "e8238e53-faf8-4dc1-a726-76368f0319be"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.345790 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-config" (OuterVolumeSpecName: "config") pod "e8238e53-faf8-4dc1-a726-76368f0319be" (UID: "e8238e53-faf8-4dc1-a726-76368f0319be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.346135 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-client-ca" (OuterVolumeSpecName: "client-ca") pod "e8238e53-faf8-4dc1-a726-76368f0319be" (UID: "e8238e53-faf8-4dc1-a726-76368f0319be"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.350643 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8238e53-faf8-4dc1-a726-76368f0319be-kube-api-access-jc7gm" (OuterVolumeSpecName: "kube-api-access-jc7gm") pod "e8238e53-faf8-4dc1-a726-76368f0319be" (UID: "e8238e53-faf8-4dc1-a726-76368f0319be"). InnerVolumeSpecName "kube-api-access-jc7gm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.357350 4687 generic.go:334] "Generic (PLEG): container finished" podID="e8238e53-faf8-4dc1-a726-76368f0319be" containerID="0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0" exitCode=0 Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.357417 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.357421 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" event={"ID":"e8238e53-faf8-4dc1-a726-76368f0319be","Type":"ContainerDied","Data":"0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0"} Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.357563 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2qjbv" event={"ID":"e8238e53-faf8-4dc1-a726-76368f0319be","Type":"ContainerDied","Data":"222efd062506b9f3ecc0569190926354a4c20ed7a93ea82673be84e3330a2b15"} Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.357603 4687 scope.go:117] "RemoveContainer" containerID="0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.361822 4687 generic.go:334] "Generic (PLEG): container finished" podID="0a45ab31-45db-4069-8da2-4c53cd2689ca" containerID="ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a" exitCode=0 Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.361849 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" event={"ID":"0a45ab31-45db-4069-8da2-4c53cd2689ca","Type":"ContainerDied","Data":"ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a"} Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.361878 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" event={"ID":"0a45ab31-45db-4069-8da2-4c53cd2689ca","Type":"ContainerDied","Data":"8e8dded4dbb95ac4bb3f08651683e96a690616c2cc6435bc966e386b7329819b"} Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.361932 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.369371 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8238e53-faf8-4dc1-a726-76368f0319be-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e8238e53-faf8-4dc1-a726-76368f0319be" (UID: "e8238e53-faf8-4dc1-a726-76368f0319be"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.389601 4687 scope.go:117] "RemoveContainer" containerID="0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0" Nov 25 09:08:42 crc kubenswrapper[4687]: E1125 09:08:42.390184 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0\": container with ID starting with 0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0 not found: ID does not exist" containerID="0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.390222 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0"} err="failed to get container status \"0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0\": rpc error: code = NotFound desc = could not find container \"0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0\": container with ID starting with 0ea82645e2ce7736fbe36319f6d2b6cc5f765d2635c42e6900ce0c6232fa07a0 not found: ID does not exist" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.390247 4687 scope.go:117] "RemoveContainer" containerID="ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.411365 4687 scope.go:117] "RemoveContainer" containerID="ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a" Nov 25 09:08:42 crc kubenswrapper[4687]: E1125 09:08:42.411874 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a\": container with ID starting with ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a not found: ID does not exist" containerID="ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.411942 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a"} err="failed to get container status \"ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a\": rpc error: code = NotFound desc = could not find container \"ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a\": container with ID starting with ca13687ccde42f08ef64b146be839dfd60f53ef94ba6c6a0e0c513ae5a5c322a not found: ID does not exist" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.445777 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-client-ca\") pod \"0a45ab31-45db-4069-8da2-4c53cd2689ca\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.446689 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sprbc\" (UniqueName: \"kubernetes.io/projected/0a45ab31-45db-4069-8da2-4c53cd2689ca-kube-api-access-sprbc\") pod \"0a45ab31-45db-4069-8da2-4c53cd2689ca\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.446636 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-client-ca" (OuterVolumeSpecName: "client-ca") pod "0a45ab31-45db-4069-8da2-4c53cd2689ca" (UID: "0a45ab31-45db-4069-8da2-4c53cd2689ca"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.447105 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-config\") pod \"0a45ab31-45db-4069-8da2-4c53cd2689ca\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.447168 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a45ab31-45db-4069-8da2-4c53cd2689ca-serving-cert\") pod \"0a45ab31-45db-4069-8da2-4c53cd2689ca\" (UID: \"0a45ab31-45db-4069-8da2-4c53cd2689ca\") " Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.447552 4687 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.447570 4687 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.447593 4687 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.447606 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e8238e53-faf8-4dc1-a726-76368f0319be-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.447615 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e8238e53-faf8-4dc1-a726-76368f0319be-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.447623 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jc7gm\" (UniqueName: \"kubernetes.io/projected/e8238e53-faf8-4dc1-a726-76368f0319be-kube-api-access-jc7gm\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.447620 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-config" (OuterVolumeSpecName: "config") pod "0a45ab31-45db-4069-8da2-4c53cd2689ca" (UID: "0a45ab31-45db-4069-8da2-4c53cd2689ca"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.449898 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a45ab31-45db-4069-8da2-4c53cd2689ca-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0a45ab31-45db-4069-8da2-4c53cd2689ca" (UID: "0a45ab31-45db-4069-8da2-4c53cd2689ca"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.449919 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a45ab31-45db-4069-8da2-4c53cd2689ca-kube-api-access-sprbc" (OuterVolumeSpecName: "kube-api-access-sprbc") pod "0a45ab31-45db-4069-8da2-4c53cd2689ca" (UID: "0a45ab31-45db-4069-8da2-4c53cd2689ca"). InnerVolumeSpecName "kube-api-access-sprbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.548862 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a45ab31-45db-4069-8da2-4c53cd2689ca-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.548900 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a45ab31-45db-4069-8da2-4c53cd2689ca-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.548910 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sprbc\" (UniqueName: \"kubernetes.io/projected/0a45ab31-45db-4069-8da2-4c53cd2689ca-kube-api-access-sprbc\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.703975 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2qjbv"] Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.710559 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2qjbv"] Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.714553 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf"] Nov 25 09:08:42 crc kubenswrapper[4687]: I1125 09:08:42.718176 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hqtxf"] Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.741910 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a45ab31-45db-4069-8da2-4c53cd2689ca" path="/var/lib/kubelet/pods/0a45ab31-45db-4069-8da2-4c53cd2689ca/volumes" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.742980 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8238e53-faf8-4dc1-a726-76368f0319be" path="/var/lib/kubelet/pods/e8238e53-faf8-4dc1-a726-76368f0319be/volumes" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.857073 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6588b54b8f-g898x"] Nov 25 09:08:43 crc kubenswrapper[4687]: E1125 09:08:43.857452 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8238e53-faf8-4dc1-a726-76368f0319be" containerName="controller-manager" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.857478 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8238e53-faf8-4dc1-a726-76368f0319be" containerName="controller-manager" Nov 25 09:08:43 crc kubenswrapper[4687]: E1125 09:08:43.857530 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a45ab31-45db-4069-8da2-4c53cd2689ca" containerName="route-controller-manager" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.857552 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a45ab31-45db-4069-8da2-4c53cd2689ca" containerName="route-controller-manager" Nov 25 09:08:43 crc kubenswrapper[4687]: E1125 09:08:43.857603 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.857616 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.857784 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.857803 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a45ab31-45db-4069-8da2-4c53cd2689ca" containerName="route-controller-manager" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.857821 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8238e53-faf8-4dc1-a726-76368f0319be" containerName="controller-manager" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.858453 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.862539 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.862874 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.863017 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.863065 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.863277 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d"] Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.863898 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.864431 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.865845 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.871616 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.871802 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.871835 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.872089 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.872251 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.872406 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.879918 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.885635 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d"] Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.894559 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6588b54b8f-g898x"] Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.971610 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-proxy-ca-bundles\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.971657 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a346a690-3737-4ddb-bbd8-6bc270ce92d9-serving-cert\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.971692 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8dfc\" (UniqueName: \"kubernetes.io/projected/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-kube-api-access-m8dfc\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.971725 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njxqv\" (UniqueName: \"kubernetes.io/projected/a346a690-3737-4ddb-bbd8-6bc270ce92d9-kube-api-access-njxqv\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.971748 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-config\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.971772 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-config\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.971793 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-client-ca\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.971814 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-serving-cert\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:43 crc kubenswrapper[4687]: I1125 09:08:43.971835 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-client-ca\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.027490 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6588b54b8f-g898x"] Nov 25 09:08:44 crc kubenswrapper[4687]: E1125 09:08:44.027970 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[client-ca config kube-api-access-m8dfc proxy-ca-bundles serving-cert], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" podUID="f66dcad0-2928-46cb-9f72-53d96d4a3bf8" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.039621 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d"] Nov 25 09:08:44 crc kubenswrapper[4687]: E1125 09:08:44.039999 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[client-ca config kube-api-access-njxqv serving-cert], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" podUID="a346a690-3737-4ddb-bbd8-6bc270ce92d9" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.072546 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-config\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.072599 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-client-ca\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.072620 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-serving-cert\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.072639 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-client-ca\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.072671 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-proxy-ca-bundles\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.072692 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a346a690-3737-4ddb-bbd8-6bc270ce92d9-serving-cert\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.072715 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8dfc\" (UniqueName: \"kubernetes.io/projected/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-kube-api-access-m8dfc\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.072744 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njxqv\" (UniqueName: \"kubernetes.io/projected/a346a690-3737-4ddb-bbd8-6bc270ce92d9-kube-api-access-njxqv\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.072765 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-config\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.074071 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-config\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.074920 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-config\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.075831 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-client-ca\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.077290 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-client-ca\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.077602 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-proxy-ca-bundles\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.081885 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-serving-cert\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.081889 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a346a690-3737-4ddb-bbd8-6bc270ce92d9-serving-cert\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.094393 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njxqv\" (UniqueName: \"kubernetes.io/projected/a346a690-3737-4ddb-bbd8-6bc270ce92d9-kube-api-access-njxqv\") pod \"route-controller-manager-7887f885ff-lvm4d\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.095319 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8dfc\" (UniqueName: \"kubernetes.io/projected/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-kube-api-access-m8dfc\") pod \"controller-manager-6588b54b8f-g898x\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.375237 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.375277 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.387313 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.400672 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.479063 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-serving-cert\") pod \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.479337 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njxqv\" (UniqueName: \"kubernetes.io/projected/a346a690-3737-4ddb-bbd8-6bc270ce92d9-kube-api-access-njxqv\") pod \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.479494 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a346a690-3737-4ddb-bbd8-6bc270ce92d9-serving-cert\") pod \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.479611 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-config\") pod \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.479720 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-proxy-ca-bundles\") pod \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.479810 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-config\") pod \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.479935 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8dfc\" (UniqueName: \"kubernetes.io/projected/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-kube-api-access-m8dfc\") pod \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.480084 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-client-ca\") pod \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\" (UID: \"a346a690-3737-4ddb-bbd8-6bc270ce92d9\") " Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.480220 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-client-ca\") pod \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\" (UID: \"f66dcad0-2928-46cb-9f72-53d96d4a3bf8\") " Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.480401 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-config" (OuterVolumeSpecName: "config") pod "a346a690-3737-4ddb-bbd8-6bc270ce92d9" (UID: "a346a690-3737-4ddb-bbd8-6bc270ce92d9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.480476 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-client-ca" (OuterVolumeSpecName: "client-ca") pod "a346a690-3737-4ddb-bbd8-6bc270ce92d9" (UID: "a346a690-3737-4ddb-bbd8-6bc270ce92d9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.480541 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f66dcad0-2928-46cb-9f72-53d96d4a3bf8" (UID: "f66dcad0-2928-46cb-9f72-53d96d4a3bf8"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.480582 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-config" (OuterVolumeSpecName: "config") pod "f66dcad0-2928-46cb-9f72-53d96d4a3bf8" (UID: "f66dcad0-2928-46cb-9f72-53d96d4a3bf8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.480797 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.480886 4687 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.480957 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.481026 4687 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a346a690-3737-4ddb-bbd8-6bc270ce92d9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.480823 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-client-ca" (OuterVolumeSpecName: "client-ca") pod "f66dcad0-2928-46cb-9f72-53d96d4a3bf8" (UID: "f66dcad0-2928-46cb-9f72-53d96d4a3bf8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.483285 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-kube-api-access-m8dfc" (OuterVolumeSpecName: "kube-api-access-m8dfc") pod "f66dcad0-2928-46cb-9f72-53d96d4a3bf8" (UID: "f66dcad0-2928-46cb-9f72-53d96d4a3bf8"). InnerVolumeSpecName "kube-api-access-m8dfc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.483990 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a346a690-3737-4ddb-bbd8-6bc270ce92d9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a346a690-3737-4ddb-bbd8-6bc270ce92d9" (UID: "a346a690-3737-4ddb-bbd8-6bc270ce92d9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.484199 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a346a690-3737-4ddb-bbd8-6bc270ce92d9-kube-api-access-njxqv" (OuterVolumeSpecName: "kube-api-access-njxqv") pod "a346a690-3737-4ddb-bbd8-6bc270ce92d9" (UID: "a346a690-3737-4ddb-bbd8-6bc270ce92d9"). InnerVolumeSpecName "kube-api-access-njxqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.484798 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f66dcad0-2928-46cb-9f72-53d96d4a3bf8" (UID: "f66dcad0-2928-46cb-9f72-53d96d4a3bf8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.582544 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8dfc\" (UniqueName: \"kubernetes.io/projected/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-kube-api-access-m8dfc\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.582795 4687 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.582873 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f66dcad0-2928-46cb-9f72-53d96d4a3bf8-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.582939 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njxqv\" (UniqueName: \"kubernetes.io/projected/a346a690-3737-4ddb-bbd8-6bc270ce92d9-kube-api-access-njxqv\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4687]: I1125 09:08:44.583003 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a346a690-3737-4ddb-bbd8-6bc270ce92d9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.380911 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.380911 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6588b54b8f-g898x" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.420819 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-b974966b8-wkmcm"] Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.421775 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.428567 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.429142 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.429855 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.429860 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.433399 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.434737 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.441294 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6588b54b8f-g898x"] Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.443799 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.454113 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6588b54b8f-g898x"] Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.459923 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b974966b8-wkmcm"] Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.475661 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d"] Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.477579 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7887f885ff-lvm4d"] Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.494718 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-client-ca\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.494779 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-config\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.494806 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-serving-cert\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.494829 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2g4pf\" (UniqueName: \"kubernetes.io/projected/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-kube-api-access-2g4pf\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.494850 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-proxy-ca-bundles\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.596557 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-client-ca\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.596640 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-config\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.596679 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-serving-cert\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.596715 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2g4pf\" (UniqueName: \"kubernetes.io/projected/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-kube-api-access-2g4pf\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.596750 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-proxy-ca-bundles\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.597893 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-client-ca\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.598389 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-config\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.599015 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-proxy-ca-bundles\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.602130 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-serving-cert\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.617239 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2g4pf\" (UniqueName: \"kubernetes.io/projected/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-kube-api-access-2g4pf\") pod \"controller-manager-b974966b8-wkmcm\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.741556 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a346a690-3737-4ddb-bbd8-6bc270ce92d9" path="/var/lib/kubelet/pods/a346a690-3737-4ddb-bbd8-6bc270ce92d9/volumes" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.741963 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f66dcad0-2928-46cb-9f72-53d96d4a3bf8" path="/var/lib/kubelet/pods/f66dcad0-2928-46cb-9f72-53d96d4a3bf8/volumes" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.748882 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:45 crc kubenswrapper[4687]: I1125 09:08:45.990921 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b974966b8-wkmcm"] Nov 25 09:08:46 crc kubenswrapper[4687]: I1125 09:08:46.387341 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" event={"ID":"3bc5a2c1-0177-46ff-b1ef-1090394a4afd","Type":"ContainerStarted","Data":"857e355fea6d385696835ca38ed9494b48e84dbdc8eab309c89d4750e8ee974e"} Nov 25 09:08:46 crc kubenswrapper[4687]: I1125 09:08:46.387687 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:46 crc kubenswrapper[4687]: I1125 09:08:46.387699 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" event={"ID":"3bc5a2c1-0177-46ff-b1ef-1090394a4afd","Type":"ContainerStarted","Data":"70a22845e81c575a2bcd515d7673c762b8c816e9b243c6363a032c67e6f570bd"} Nov 25 09:08:46 crc kubenswrapper[4687]: I1125 09:08:46.389076 4687 patch_prober.go:28] interesting pod/controller-manager-b974966b8-wkmcm container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Nov 25 09:08:46 crc kubenswrapper[4687]: I1125 09:08:46.389149 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" podUID="3bc5a2c1-0177-46ff-b1ef-1090394a4afd" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" Nov 25 09:08:46 crc kubenswrapper[4687]: I1125 09:08:46.405080 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" podStartSLOduration=2.405063228 podStartE2EDuration="2.405063228s" podCreationTimestamp="2025-11-25 09:08:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:46.403452483 +0000 UTC m=+321.457092201" watchObservedRunningTime="2025-11-25 09:08:46.405063228 +0000 UTC m=+321.458702946" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.396081 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.862914 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r"] Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.863652 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.866736 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.871031 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.871098 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.871517 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.871555 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.871709 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.879011 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r"] Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.924233 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-config\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.924326 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-serving-cert\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.924394 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-client-ca\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:47 crc kubenswrapper[4687]: I1125 09:08:47.924460 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gsgv\" (UniqueName: \"kubernetes.io/projected/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-kube-api-access-5gsgv\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:48 crc kubenswrapper[4687]: I1125 09:08:48.025356 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-serving-cert\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:48 crc kubenswrapper[4687]: I1125 09:08:48.025690 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-client-ca\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:48 crc kubenswrapper[4687]: I1125 09:08:48.025841 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gsgv\" (UniqueName: \"kubernetes.io/projected/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-kube-api-access-5gsgv\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:48 crc kubenswrapper[4687]: I1125 09:08:48.026296 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-config\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:48 crc kubenswrapper[4687]: I1125 09:08:48.027312 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-client-ca\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:48 crc kubenswrapper[4687]: I1125 09:08:48.027391 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-config\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:48 crc kubenswrapper[4687]: I1125 09:08:48.033944 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-serving-cert\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:48 crc kubenswrapper[4687]: I1125 09:08:48.047876 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gsgv\" (UniqueName: \"kubernetes.io/projected/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-kube-api-access-5gsgv\") pod \"route-controller-manager-585b55f498-fqb4r\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:48 crc kubenswrapper[4687]: I1125 09:08:48.185110 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:48 crc kubenswrapper[4687]: W1125 09:08:48.411904 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7819bf5a_7449_45b1_8abe_9c25f33f0b0a.slice/crio-2be96d7fdf2e56945b8c6e6b05bace35cc6a907b088e95bbae8865e7f0fe8a69 WatchSource:0}: Error finding container 2be96d7fdf2e56945b8c6e6b05bace35cc6a907b088e95bbae8865e7f0fe8a69: Status 404 returned error can't find the container with id 2be96d7fdf2e56945b8c6e6b05bace35cc6a907b088e95bbae8865e7f0fe8a69 Nov 25 09:08:48 crc kubenswrapper[4687]: I1125 09:08:48.411941 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r"] Nov 25 09:08:49 crc kubenswrapper[4687]: I1125 09:08:49.404790 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" event={"ID":"7819bf5a-7449-45b1-8abe-9c25f33f0b0a","Type":"ContainerStarted","Data":"49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313"} Nov 25 09:08:49 crc kubenswrapper[4687]: I1125 09:08:49.405110 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" event={"ID":"7819bf5a-7449-45b1-8abe-9c25f33f0b0a","Type":"ContainerStarted","Data":"2be96d7fdf2e56945b8c6e6b05bace35cc6a907b088e95bbae8865e7f0fe8a69"} Nov 25 09:08:49 crc kubenswrapper[4687]: I1125 09:08:49.405132 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:49 crc kubenswrapper[4687]: I1125 09:08:49.410404 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:08:49 crc kubenswrapper[4687]: I1125 09:08:49.422350 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" podStartSLOduration=5.422332501 podStartE2EDuration="5.422332501s" podCreationTimestamp="2025-11-25 09:08:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:49.419452261 +0000 UTC m=+324.473091979" watchObservedRunningTime="2025-11-25 09:08:49.422332501 +0000 UTC m=+324.475972209" Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.858260 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-7fc9p"] Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.860080 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.874649 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-7fc9p"] Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.898183 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/81999114-ac1d-4ffc-a424-16f439f7c2b1-trusted-ca\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.898281 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/81999114-ac1d-4ffc-a424-16f439f7c2b1-registry-certificates\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.898358 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/81999114-ac1d-4ffc-a424-16f439f7c2b1-installation-pull-secrets\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.898423 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/81999114-ac1d-4ffc-a424-16f439f7c2b1-bound-sa-token\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.898468 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/81999114-ac1d-4ffc-a424-16f439f7c2b1-registry-tls\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.898545 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rq6qg\" (UniqueName: \"kubernetes.io/projected/81999114-ac1d-4ffc-a424-16f439f7c2b1-kube-api-access-rq6qg\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.898589 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/81999114-ac1d-4ffc-a424-16f439f7c2b1-ca-trust-extracted\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.898639 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:00 crc kubenswrapper[4687]: I1125 09:09:00.925586 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.000054 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/81999114-ac1d-4ffc-a424-16f439f7c2b1-registry-tls\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.000119 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rq6qg\" (UniqueName: \"kubernetes.io/projected/81999114-ac1d-4ffc-a424-16f439f7c2b1-kube-api-access-rq6qg\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.000142 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/81999114-ac1d-4ffc-a424-16f439f7c2b1-ca-trust-extracted\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.000165 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/81999114-ac1d-4ffc-a424-16f439f7c2b1-trusted-ca\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.000196 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/81999114-ac1d-4ffc-a424-16f439f7c2b1-registry-certificates\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.000229 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/81999114-ac1d-4ffc-a424-16f439f7c2b1-installation-pull-secrets\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.000247 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/81999114-ac1d-4ffc-a424-16f439f7c2b1-bound-sa-token\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.001486 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/81999114-ac1d-4ffc-a424-16f439f7c2b1-ca-trust-extracted\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.002333 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/81999114-ac1d-4ffc-a424-16f439f7c2b1-registry-certificates\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.002371 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/81999114-ac1d-4ffc-a424-16f439f7c2b1-trusted-ca\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.006782 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/81999114-ac1d-4ffc-a424-16f439f7c2b1-installation-pull-secrets\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.007987 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/81999114-ac1d-4ffc-a424-16f439f7c2b1-registry-tls\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.017104 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/81999114-ac1d-4ffc-a424-16f439f7c2b1-bound-sa-token\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.018796 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rq6qg\" (UniqueName: \"kubernetes.io/projected/81999114-ac1d-4ffc-a424-16f439f7c2b1-kube-api-access-rq6qg\") pod \"image-registry-66df7c8f76-7fc9p\" (UID: \"81999114-ac1d-4ffc-a424-16f439f7c2b1\") " pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.180398 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.589423 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-7fc9p"] Nov 25 09:09:01 crc kubenswrapper[4687]: W1125 09:09:01.599562 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81999114_ac1d_4ffc_a424_16f439f7c2b1.slice/crio-e14c0a8b654eeaef3066446b33a8a2c337c549a94b03400c7800238fed57b77d WatchSource:0}: Error finding container e14c0a8b654eeaef3066446b33a8a2c337c549a94b03400c7800238fed57b77d: Status 404 returned error can't find the container with id e14c0a8b654eeaef3066446b33a8a2c337c549a94b03400c7800238fed57b77d Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.867894 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-b974966b8-wkmcm"] Nov 25 09:09:01 crc kubenswrapper[4687]: I1125 09:09:01.868398 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" podUID="3bc5a2c1-0177-46ff-b1ef-1090394a4afd" containerName="controller-manager" containerID="cri-o://857e355fea6d385696835ca38ed9494b48e84dbdc8eab309c89d4750e8ee974e" gracePeriod=30 Nov 25 09:09:02 crc kubenswrapper[4687]: I1125 09:09:02.476142 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" event={"ID":"81999114-ac1d-4ffc-a424-16f439f7c2b1","Type":"ContainerStarted","Data":"2064cecbba15616c3d5f5c263589ac6bffdd2ec05faa5fb1311883c06a0c792c"} Nov 25 09:09:02 crc kubenswrapper[4687]: I1125 09:09:02.476192 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" event={"ID":"81999114-ac1d-4ffc-a424-16f439f7c2b1","Type":"ContainerStarted","Data":"e14c0a8b654eeaef3066446b33a8a2c337c549a94b03400c7800238fed57b77d"} Nov 25 09:09:02 crc kubenswrapper[4687]: I1125 09:09:02.476309 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:02 crc kubenswrapper[4687]: I1125 09:09:02.478601 4687 generic.go:334] "Generic (PLEG): container finished" podID="3bc5a2c1-0177-46ff-b1ef-1090394a4afd" containerID="857e355fea6d385696835ca38ed9494b48e84dbdc8eab309c89d4750e8ee974e" exitCode=0 Nov 25 09:09:02 crc kubenswrapper[4687]: I1125 09:09:02.478645 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" event={"ID":"3bc5a2c1-0177-46ff-b1ef-1090394a4afd","Type":"ContainerDied","Data":"857e355fea6d385696835ca38ed9494b48e84dbdc8eab309c89d4750e8ee974e"} Nov 25 09:09:02 crc kubenswrapper[4687]: I1125 09:09:02.505533 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" podStartSLOduration=2.505483029 podStartE2EDuration="2.505483029s" podCreationTimestamp="2025-11-25 09:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:02.499955597 +0000 UTC m=+337.553595325" watchObservedRunningTime="2025-11-25 09:09:02.505483029 +0000 UTC m=+337.559122767" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.055065 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.081111 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-85b64c87d-rw8lf"] Nov 25 09:09:03 crc kubenswrapper[4687]: E1125 09:09:03.081393 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bc5a2c1-0177-46ff-b1ef-1090394a4afd" containerName="controller-manager" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.081414 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bc5a2c1-0177-46ff-b1ef-1090394a4afd" containerName="controller-manager" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.081529 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bc5a2c1-0177-46ff-b1ef-1090394a4afd" containerName="controller-manager" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.081995 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.088205 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-85b64c87d-rw8lf"] Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.134685 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-serving-cert\") pod \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.134775 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-client-ca\") pod \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.134820 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-proxy-ca-bundles\") pod \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.134845 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2g4pf\" (UniqueName: \"kubernetes.io/projected/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-kube-api-access-2g4pf\") pod \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.134902 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-config\") pod \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\" (UID: \"3bc5a2c1-0177-46ff-b1ef-1090394a4afd\") " Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.135132 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb4js\" (UniqueName: \"kubernetes.io/projected/61995a37-b5a6-4b70-882c-f495f9f26450-kube-api-access-rb4js\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.135171 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/61995a37-b5a6-4b70-882c-f495f9f26450-proxy-ca-bundles\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.135213 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61995a37-b5a6-4b70-882c-f495f9f26450-config\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.135237 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61995a37-b5a6-4b70-882c-f495f9f26450-serving-cert\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.135264 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/61995a37-b5a6-4b70-882c-f495f9f26450-client-ca\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.135742 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "3bc5a2c1-0177-46ff-b1ef-1090394a4afd" (UID: "3bc5a2c1-0177-46ff-b1ef-1090394a4afd"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.135772 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-config" (OuterVolumeSpecName: "config") pod "3bc5a2c1-0177-46ff-b1ef-1090394a4afd" (UID: "3bc5a2c1-0177-46ff-b1ef-1090394a4afd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.135759 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-client-ca" (OuterVolumeSpecName: "client-ca") pod "3bc5a2c1-0177-46ff-b1ef-1090394a4afd" (UID: "3bc5a2c1-0177-46ff-b1ef-1090394a4afd"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.140681 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-kube-api-access-2g4pf" (OuterVolumeSpecName: "kube-api-access-2g4pf") pod "3bc5a2c1-0177-46ff-b1ef-1090394a4afd" (UID: "3bc5a2c1-0177-46ff-b1ef-1090394a4afd"). InnerVolumeSpecName "kube-api-access-2g4pf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.145897 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3bc5a2c1-0177-46ff-b1ef-1090394a4afd" (UID: "3bc5a2c1-0177-46ff-b1ef-1090394a4afd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.239732 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb4js\" (UniqueName: \"kubernetes.io/projected/61995a37-b5a6-4b70-882c-f495f9f26450-kube-api-access-rb4js\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.239800 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/61995a37-b5a6-4b70-882c-f495f9f26450-proxy-ca-bundles\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.239847 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61995a37-b5a6-4b70-882c-f495f9f26450-config\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.239870 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61995a37-b5a6-4b70-882c-f495f9f26450-serving-cert\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.239894 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/61995a37-b5a6-4b70-882c-f495f9f26450-client-ca\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.239957 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.239972 4687 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.239984 4687 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.239996 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2g4pf\" (UniqueName: \"kubernetes.io/projected/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-kube-api-access-2g4pf\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.240007 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bc5a2c1-0177-46ff-b1ef-1090394a4afd-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.241151 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/61995a37-b5a6-4b70-882c-f495f9f26450-client-ca\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.241419 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/61995a37-b5a6-4b70-882c-f495f9f26450-proxy-ca-bundles\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.241618 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61995a37-b5a6-4b70-882c-f495f9f26450-config\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.243945 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61995a37-b5a6-4b70-882c-f495f9f26450-serving-cert\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.259406 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb4js\" (UniqueName: \"kubernetes.io/projected/61995a37-b5a6-4b70-882c-f495f9f26450-kube-api-access-rb4js\") pod \"controller-manager-85b64c87d-rw8lf\" (UID: \"61995a37-b5a6-4b70-882c-f495f9f26450\") " pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.405990 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.488643 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.490668 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b974966b8-wkmcm" event={"ID":"3bc5a2c1-0177-46ff-b1ef-1090394a4afd","Type":"ContainerDied","Data":"70a22845e81c575a2bcd515d7673c762b8c816e9b243c6363a032c67e6f570bd"} Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.490769 4687 scope.go:117] "RemoveContainer" containerID="857e355fea6d385696835ca38ed9494b48e84dbdc8eab309c89d4750e8ee974e" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.519371 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-b974966b8-wkmcm"] Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.533990 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-b974966b8-wkmcm"] Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.746445 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bc5a2c1-0177-46ff-b1ef-1090394a4afd" path="/var/lib/kubelet/pods/3bc5a2c1-0177-46ff-b1ef-1090394a4afd/volumes" Nov 25 09:09:03 crc kubenswrapper[4687]: I1125 09:09:03.847389 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-85b64c87d-rw8lf"] Nov 25 09:09:03 crc kubenswrapper[4687]: W1125 09:09:03.857807 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61995a37_b5a6_4b70_882c_f495f9f26450.slice/crio-09ebc8f22887bcf296f350c115f00d421dd8425c7f8455789538fee879d56dec WatchSource:0}: Error finding container 09ebc8f22887bcf296f350c115f00d421dd8425c7f8455789538fee879d56dec: Status 404 returned error can't find the container with id 09ebc8f22887bcf296f350c115f00d421dd8425c7f8455789538fee879d56dec Nov 25 09:09:04 crc kubenswrapper[4687]: I1125 09:09:04.495428 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" event={"ID":"61995a37-b5a6-4b70-882c-f495f9f26450","Type":"ContainerStarted","Data":"d7e5fe45bdb7345b27b398becb501a63021b5eca39bced1e9e432ecb58af27a5"} Nov 25 09:09:04 crc kubenswrapper[4687]: I1125 09:09:04.496877 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" event={"ID":"61995a37-b5a6-4b70-882c-f495f9f26450","Type":"ContainerStarted","Data":"09ebc8f22887bcf296f350c115f00d421dd8425c7f8455789538fee879d56dec"} Nov 25 09:09:04 crc kubenswrapper[4687]: I1125 09:09:04.497031 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:04 crc kubenswrapper[4687]: I1125 09:09:04.508259 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" Nov 25 09:09:04 crc kubenswrapper[4687]: I1125 09:09:04.515406 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-85b64c87d-rw8lf" podStartSLOduration=3.515391206 podStartE2EDuration="3.515391206s" podCreationTimestamp="2025-11-25 09:09:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:04.511888605 +0000 UTC m=+339.565528343" watchObservedRunningTime="2025-11-25 09:09:04.515391206 +0000 UTC m=+339.569030924" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.216077 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gpptd"] Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.216873 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gpptd" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" containerName="registry-server" containerID="cri-o://3917a7ee114715ca9218a4df6f577faaacf4f88b333309eb5cfa742470c2310d" gracePeriod=30 Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.228333 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-59vsx"] Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.228621 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-59vsx" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerName="registry-server" containerID="cri-o://919d925f253c665e7c4b4ee72e57c4daa6d9ab7a87277bbfc937648664dbc4b9" gracePeriod=30 Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.243171 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gm5pk"] Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.243722 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" podUID="d31aac44-f947-4eae-811c-9c0822a157d0" containerName="marketplace-operator" containerID="cri-o://3a2293f04b12d5c3b1c82bb4e57ad8d5a6ae17331129ef3244d477b16aa7e7e9" gracePeriod=30 Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.258112 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx4ps"] Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.258413 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nx4ps" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" containerName="registry-server" containerID="cri-o://90bdd72d9c318d4c97f055cf8bedf1a86acb4aa9e62b720a7c6702a8944b730d" gracePeriod=30 Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.262865 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-sdgfq"] Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.280472 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fqnbt"] Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.280881 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fqnbt" podUID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerName="registry-server" containerID="cri-o://94cc138c70abb9f2a2a49f202efafe4e7e86d67c776a1a891a33272db324268f" gracePeriod=30 Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.281086 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.296203 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-sdgfq"] Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.346901 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dsp4\" (UniqueName: \"kubernetes.io/projected/a26f1e8c-3181-4dbf-b2b6-13772b1d66d6-kube-api-access-2dsp4\") pod \"marketplace-operator-79b997595-sdgfq\" (UID: \"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.346983 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a26f1e8c-3181-4dbf-b2b6-13772b1d66d6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-sdgfq\" (UID: \"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.347118 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a26f1e8c-3181-4dbf-b2b6-13772b1d66d6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-sdgfq\" (UID: \"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.448126 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a26f1e8c-3181-4dbf-b2b6-13772b1d66d6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-sdgfq\" (UID: \"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.448408 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a26f1e8c-3181-4dbf-b2b6-13772b1d66d6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-sdgfq\" (UID: \"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.448473 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dsp4\" (UniqueName: \"kubernetes.io/projected/a26f1e8c-3181-4dbf-b2b6-13772b1d66d6-kube-api-access-2dsp4\") pod \"marketplace-operator-79b997595-sdgfq\" (UID: \"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.449604 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a26f1e8c-3181-4dbf-b2b6-13772b1d66d6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-sdgfq\" (UID: \"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.454109 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a26f1e8c-3181-4dbf-b2b6-13772b1d66d6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-sdgfq\" (UID: \"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.467712 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dsp4\" (UniqueName: \"kubernetes.io/projected/a26f1e8c-3181-4dbf-b2b6-13772b1d66d6-kube-api-access-2dsp4\") pod \"marketplace-operator-79b997595-sdgfq\" (UID: \"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.540575 4687 generic.go:334] "Generic (PLEG): container finished" podID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerID="94cc138c70abb9f2a2a49f202efafe4e7e86d67c776a1a891a33272db324268f" exitCode=0 Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.540666 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fqnbt" event={"ID":"c9e2b2d2-bd11-4098-b71a-d2787d834e9d","Type":"ContainerDied","Data":"94cc138c70abb9f2a2a49f202efafe4e7e86d67c776a1a891a33272db324268f"} Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.544133 4687 generic.go:334] "Generic (PLEG): container finished" podID="ccde1639-05be-47c5-93b8-c1eb83167814" containerID="3917a7ee114715ca9218a4df6f577faaacf4f88b333309eb5cfa742470c2310d" exitCode=0 Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.544189 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpptd" event={"ID":"ccde1639-05be-47c5-93b8-c1eb83167814","Type":"ContainerDied","Data":"3917a7ee114715ca9218a4df6f577faaacf4f88b333309eb5cfa742470c2310d"} Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.546274 4687 generic.go:334] "Generic (PLEG): container finished" podID="d31aac44-f947-4eae-811c-9c0822a157d0" containerID="3a2293f04b12d5c3b1c82bb4e57ad8d5a6ae17331129ef3244d477b16aa7e7e9" exitCode=0 Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.546319 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" event={"ID":"d31aac44-f947-4eae-811c-9c0822a157d0","Type":"ContainerDied","Data":"3a2293f04b12d5c3b1c82bb4e57ad8d5a6ae17331129ef3244d477b16aa7e7e9"} Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.554661 4687 generic.go:334] "Generic (PLEG): container finished" podID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerID="919d925f253c665e7c4b4ee72e57c4daa6d9ab7a87277bbfc937648664dbc4b9" exitCode=0 Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.554720 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59vsx" event={"ID":"d8cf4b36-d7aa-4d9a-bc65-a893435ca244","Type":"ContainerDied","Data":"919d925f253c665e7c4b4ee72e57c4daa6d9ab7a87277bbfc937648664dbc4b9"} Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.563640 4687 generic.go:334] "Generic (PLEG): container finished" podID="3afcc926-b324-4fe9-933c-4918a88619d9" containerID="90bdd72d9c318d4c97f055cf8bedf1a86acb4aa9e62b720a7c6702a8944b730d" exitCode=0 Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.563671 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx4ps" event={"ID":"3afcc926-b324-4fe9-933c-4918a88619d9","Type":"ContainerDied","Data":"90bdd72d9c318d4c97f055cf8bedf1a86acb4aa9e62b720a7c6702a8944b730d"} Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.678780 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.684378 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.752990 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-utilities\") pod \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.753022 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-catalog-content\") pod \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.753186 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sl985\" (UniqueName: \"kubernetes.io/projected/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-kube-api-access-sl985\") pod \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\" (UID: \"d8cf4b36-d7aa-4d9a-bc65-a893435ca244\") " Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.754712 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-utilities" (OuterVolumeSpecName: "utilities") pod "d8cf4b36-d7aa-4d9a-bc65-a893435ca244" (UID: "d8cf4b36-d7aa-4d9a-bc65-a893435ca244"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.769235 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-kube-api-access-sl985" (OuterVolumeSpecName: "kube-api-access-sl985") pod "d8cf4b36-d7aa-4d9a-bc65-a893435ca244" (UID: "d8cf4b36-d7aa-4d9a-bc65-a893435ca244"). InnerVolumeSpecName "kube-api-access-sl985". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.828731 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8cf4b36-d7aa-4d9a-bc65-a893435ca244" (UID: "d8cf4b36-d7aa-4d9a-bc65-a893435ca244"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.854915 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sl985\" (UniqueName: \"kubernetes.io/projected/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-kube-api-access-sl985\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.854940 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.854949 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8cf4b36-d7aa-4d9a-bc65-a893435ca244-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.866482 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.888997 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.955522 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twk6f\" (UniqueName: \"kubernetes.io/projected/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-kube-api-access-twk6f\") pod \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.955589 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-catalog-content\") pod \"3afcc926-b324-4fe9-933c-4918a88619d9\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.955646 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-utilities\") pod \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.955702 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-catalog-content\") pod \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\" (UID: \"c9e2b2d2-bd11-4098-b71a-d2787d834e9d\") " Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.955747 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjttc\" (UniqueName: \"kubernetes.io/projected/3afcc926-b324-4fe9-933c-4918a88619d9-kube-api-access-vjttc\") pod \"3afcc926-b324-4fe9-933c-4918a88619d9\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.955795 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-utilities\") pod \"3afcc926-b324-4fe9-933c-4918a88619d9\" (UID: \"3afcc926-b324-4fe9-933c-4918a88619d9\") " Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.956765 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-utilities" (OuterVolumeSpecName: "utilities") pod "3afcc926-b324-4fe9-933c-4918a88619d9" (UID: "3afcc926-b324-4fe9-933c-4918a88619d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.956775 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-utilities" (OuterVolumeSpecName: "utilities") pod "c9e2b2d2-bd11-4098-b71a-d2787d834e9d" (UID: "c9e2b2d2-bd11-4098-b71a-d2787d834e9d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.960140 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-kube-api-access-twk6f" (OuterVolumeSpecName: "kube-api-access-twk6f") pod "c9e2b2d2-bd11-4098-b71a-d2787d834e9d" (UID: "c9e2b2d2-bd11-4098-b71a-d2787d834e9d"). InnerVolumeSpecName "kube-api-access-twk6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.960712 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3afcc926-b324-4fe9-933c-4918a88619d9-kube-api-access-vjttc" (OuterVolumeSpecName: "kube-api-access-vjttc") pod "3afcc926-b324-4fe9-933c-4918a88619d9" (UID: "3afcc926-b324-4fe9-933c-4918a88619d9"). InnerVolumeSpecName "kube-api-access-vjttc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:10 crc kubenswrapper[4687]: I1125 09:09:10.976202 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3afcc926-b324-4fe9-933c-4918a88619d9" (UID: "3afcc926-b324-4fe9-933c-4918a88619d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.049894 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c9e2b2d2-bd11-4098-b71a-d2787d834e9d" (UID: "c9e2b2d2-bd11-4098-b71a-d2787d834e9d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.057980 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.058032 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjttc\" (UniqueName: \"kubernetes.io/projected/3afcc926-b324-4fe9-933c-4918a88619d9-kube-api-access-vjttc\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.058046 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.058055 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twk6f\" (UniqueName: \"kubernetes.io/projected/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-kube-api-access-twk6f\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.058065 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3afcc926-b324-4fe9-933c-4918a88619d9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.058075 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e2b2d2-bd11-4098-b71a-d2787d834e9d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.177199 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-sdgfq"] Nov 25 09:09:11 crc kubenswrapper[4687]: W1125 09:09:11.190111 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda26f1e8c_3181_4dbf_b2b6_13772b1d66d6.slice/crio-26524841bdc4611dfb6f17522d37243c40e587889f1173b0502506744532f1a1 WatchSource:0}: Error finding container 26524841bdc4611dfb6f17522d37243c40e587889f1173b0502506744532f1a1: Status 404 returned error can't find the container with id 26524841bdc4611dfb6f17522d37243c40e587889f1173b0502506744532f1a1 Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.570431 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" event={"ID":"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6","Type":"ContainerStarted","Data":"5850c1006785c7ddcee5e7c03420e85214ae81042f0c4e165cb8e032e56781f2"} Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.570981 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" event={"ID":"a26f1e8c-3181-4dbf-b2b6-13772b1d66d6","Type":"ContainerStarted","Data":"26524841bdc4611dfb6f17522d37243c40e587889f1173b0502506744532f1a1"} Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.571011 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.572010 4687 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-sdgfq container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.64:8080/healthz\": dial tcp 10.217.0.64:8080: connect: connection refused" start-of-body= Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.572065 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" podUID="a26f1e8c-3181-4dbf-b2b6-13772b1d66d6" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.64:8080/healthz\": dial tcp 10.217.0.64:8080: connect: connection refused" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.573001 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59vsx" event={"ID":"d8cf4b36-d7aa-4d9a-bc65-a893435ca244","Type":"ContainerDied","Data":"ae8ee9a877e2f11d25fe74131e3fd0f30d39fc2dbbb9abf1c4236afa85b3d7cd"} Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.573082 4687 scope.go:117] "RemoveContainer" containerID="919d925f253c665e7c4b4ee72e57c4daa6d9ab7a87277bbfc937648664dbc4b9" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.573318 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59vsx" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.575739 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" event={"ID":"d31aac44-f947-4eae-811c-9c0822a157d0","Type":"ContainerDied","Data":"f7014fdfd1337028d98c2d8dc8ec25c66c18ffc0299542f2999c6b5025bd4389"} Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.575770 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7014fdfd1337028d98c2d8dc8ec25c66c18ffc0299542f2999c6b5025bd4389" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.579965 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx4ps" event={"ID":"3afcc926-b324-4fe9-933c-4918a88619d9","Type":"ContainerDied","Data":"a65cfaec9ca454e62cb265a7584b02d544deee67759f448c2a6b38ba29ccef89"} Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.580000 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx4ps" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.580298 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.582624 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fqnbt" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.582830 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fqnbt" event={"ID":"c9e2b2d2-bd11-4098-b71a-d2787d834e9d","Type":"ContainerDied","Data":"b24a2e82cf06fce8e503bdddde61833e8cb660d0138f5b78e491812327ae4863"} Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.585075 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpptd" event={"ID":"ccde1639-05be-47c5-93b8-c1eb83167814","Type":"ContainerDied","Data":"c9314e9adddc4b5ef379d8445e61e07600dec023437595d42608733c8ea2cbbe"} Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.585147 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gpptd" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.585919 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.601330 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" podStartSLOduration=1.601309155 podStartE2EDuration="1.601309155s" podCreationTimestamp="2025-11-25 09:09:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:11.593999242 +0000 UTC m=+346.647638960" watchObservedRunningTime="2025-11-25 09:09:11.601309155 +0000 UTC m=+346.654948873" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.603640 4687 scope.go:117] "RemoveContainer" containerID="2e180633bf01df9fc83530f0fb8c3f68f1976bb1fff9c6e919aa64dcfd3a7fb3" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.629413 4687 scope.go:117] "RemoveContainer" containerID="857eb552f61c3a9e3b15c4de08ef1a5fbd819b6ce644f30c70142d13fcd3151c" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.645260 4687 scope.go:117] "RemoveContainer" containerID="90bdd72d9c318d4c97f055cf8bedf1a86acb4aa9e62b720a7c6702a8944b730d" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.665376 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-trusted-ca\") pod \"d31aac44-f947-4eae-811c-9c0822a157d0\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.665436 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xs2bl\" (UniqueName: \"kubernetes.io/projected/d31aac44-f947-4eae-811c-9c0822a157d0-kube-api-access-xs2bl\") pod \"d31aac44-f947-4eae-811c-9c0822a157d0\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.665464 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-888bv\" (UniqueName: \"kubernetes.io/projected/ccde1639-05be-47c5-93b8-c1eb83167814-kube-api-access-888bv\") pod \"ccde1639-05be-47c5-93b8-c1eb83167814\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.665545 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-catalog-content\") pod \"ccde1639-05be-47c5-93b8-c1eb83167814\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.665584 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-operator-metrics\") pod \"d31aac44-f947-4eae-811c-9c0822a157d0\" (UID: \"d31aac44-f947-4eae-811c-9c0822a157d0\") " Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.665697 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-utilities\") pod \"ccde1639-05be-47c5-93b8-c1eb83167814\" (UID: \"ccde1639-05be-47c5-93b8-c1eb83167814\") " Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.666101 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "d31aac44-f947-4eae-811c-9c0822a157d0" (UID: "d31aac44-f947-4eae-811c-9c0822a157d0"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.666792 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-utilities" (OuterVolumeSpecName: "utilities") pod "ccde1639-05be-47c5-93b8-c1eb83167814" (UID: "ccde1639-05be-47c5-93b8-c1eb83167814"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.671853 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d31aac44-f947-4eae-811c-9c0822a157d0-kube-api-access-xs2bl" (OuterVolumeSpecName: "kube-api-access-xs2bl") pod "d31aac44-f947-4eae-811c-9c0822a157d0" (UID: "d31aac44-f947-4eae-811c-9c0822a157d0"). InnerVolumeSpecName "kube-api-access-xs2bl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.672072 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "d31aac44-f947-4eae-811c-9c0822a157d0" (UID: "d31aac44-f947-4eae-811c-9c0822a157d0"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.673938 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccde1639-05be-47c5-93b8-c1eb83167814-kube-api-access-888bv" (OuterVolumeSpecName: "kube-api-access-888bv") pod "ccde1639-05be-47c5-93b8-c1eb83167814" (UID: "ccde1639-05be-47c5-93b8-c1eb83167814"). InnerVolumeSpecName "kube-api-access-888bv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.674902 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx4ps"] Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.677129 4687 scope.go:117] "RemoveContainer" containerID="90a6b85e47fd5b21e2cdb165baa3675493ca81a4c765a2e20955e5a46f7a4b91" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.682323 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx4ps"] Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.691683 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-59vsx"] Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.694823 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-59vsx"] Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.696953 4687 scope.go:117] "RemoveContainer" containerID="bfa4956f9204c4927294a3ce4e3080b7167d989da5072e1407bd60a38318266f" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.703321 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fqnbt"] Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.706890 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fqnbt"] Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.721721 4687 scope.go:117] "RemoveContainer" containerID="94cc138c70abb9f2a2a49f202efafe4e7e86d67c776a1a891a33272db324268f" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.722408 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ccde1639-05be-47c5-93b8-c1eb83167814" (UID: "ccde1639-05be-47c5-93b8-c1eb83167814"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.739598 4687 scope.go:117] "RemoveContainer" containerID="ef97aa67437478c5404595ab9730d508baab5541993b6b9af8792c7dbbb24e67" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.744009 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" path="/var/lib/kubelet/pods/3afcc926-b324-4fe9-933c-4918a88619d9/volumes" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.746607 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" path="/var/lib/kubelet/pods/c9e2b2d2-bd11-4098-b71a-d2787d834e9d/volumes" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.747260 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" path="/var/lib/kubelet/pods/d8cf4b36-d7aa-4d9a-bc65-a893435ca244/volumes" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.767387 4687 scope.go:117] "RemoveContainer" containerID="87d51eb94586abd7777db1b1cbbe29942a6fb69a9180642f0a2a5e9e18695f2d" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.768744 4687 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.768765 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xs2bl\" (UniqueName: \"kubernetes.io/projected/d31aac44-f947-4eae-811c-9c0822a157d0-kube-api-access-xs2bl\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.768774 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-888bv\" (UniqueName: \"kubernetes.io/projected/ccde1639-05be-47c5-93b8-c1eb83167814-kube-api-access-888bv\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.768786 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.768795 4687 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d31aac44-f947-4eae-811c-9c0822a157d0-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.768804 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccde1639-05be-47c5-93b8-c1eb83167814-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.790357 4687 scope.go:117] "RemoveContainer" containerID="3917a7ee114715ca9218a4df6f577faaacf4f88b333309eb5cfa742470c2310d" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.804293 4687 scope.go:117] "RemoveContainer" containerID="43759463b90b228b018e6cb77363d996b893669384d2ddf8f4c267081812a120" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.816516 4687 scope.go:117] "RemoveContainer" containerID="fcd4d6f8223e9a7875ef9260966f5ea643985175eaf394ede8a2e1633b5ccc70" Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.904110 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gpptd"] Nov 25 09:09:11 crc kubenswrapper[4687]: I1125 09:09:11.907217 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gpptd"] Nov 25 09:09:12 crc kubenswrapper[4687]: I1125 09:09:12.596218 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gm5pk" Nov 25 09:09:12 crc kubenswrapper[4687]: I1125 09:09:12.600476 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-sdgfq" Nov 25 09:09:12 crc kubenswrapper[4687]: I1125 09:09:12.615258 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gm5pk"] Nov 25 09:09:12 crc kubenswrapper[4687]: I1125 09:09:12.619039 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gm5pk"] Nov 25 09:09:13 crc kubenswrapper[4687]: I1125 09:09:13.740397 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" path="/var/lib/kubelet/pods/ccde1639-05be-47c5-93b8-c1eb83167814/volumes" Nov 25 09:09:13 crc kubenswrapper[4687]: I1125 09:09:13.741084 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d31aac44-f947-4eae-811c-9c0822a157d0" path="/var/lib/kubelet/pods/d31aac44-f947-4eae-811c-9c0822a157d0/volumes" Nov 25 09:09:21 crc kubenswrapper[4687]: I1125 09:09:21.187371 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-7fc9p" Nov 25 09:09:21 crc kubenswrapper[4687]: I1125 09:09:21.235928 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2d9pq"] Nov 25 09:09:23 crc kubenswrapper[4687]: I1125 09:09:23.844921 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:09:23 crc kubenswrapper[4687]: I1125 09:09:23.845217 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:09:41 crc kubenswrapper[4687]: I1125 09:09:41.883332 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r"] Nov 25 09:09:41 crc kubenswrapper[4687]: I1125 09:09:41.884968 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" podUID="7819bf5a-7449-45b1-8abe-9c25f33f0b0a" containerName="route-controller-manager" containerID="cri-o://49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313" gracePeriod=30 Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.299944 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.447492 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qgl5s"] Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448324 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448356 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448377 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448392 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448417 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerName="extract-content" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448432 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerName="extract-content" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448455 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448470 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448496 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerName="extract-content" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448559 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerName="extract-content" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448590 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" containerName="extract-utilities" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448611 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" containerName="extract-utilities" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448632 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" containerName="extract-content" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448648 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" containerName="extract-content" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448672 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7819bf5a-7449-45b1-8abe-9c25f33f0b0a" containerName="route-controller-manager" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448690 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="7819bf5a-7449-45b1-8abe-9c25f33f0b0a" containerName="route-controller-manager" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448710 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" containerName="extract-utilities" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448726 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" containerName="extract-utilities" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448745 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d31aac44-f947-4eae-811c-9c0822a157d0" containerName="marketplace-operator" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448760 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d31aac44-f947-4eae-811c-9c0822a157d0" containerName="marketplace-operator" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448778 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerName="extract-utilities" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448793 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerName="extract-utilities" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448818 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" containerName="extract-content" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448835 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" containerName="extract-content" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448861 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448877 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.448899 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerName="extract-utilities" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.448915 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerName="extract-utilities" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.449145 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccde1639-05be-47c5-93b8-c1eb83167814" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.449170 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d31aac44-f947-4eae-811c-9c0822a157d0" containerName="marketplace-operator" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.449196 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8cf4b36-d7aa-4d9a-bc65-a893435ca244" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.449230 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="3afcc926-b324-4fe9-933c-4918a88619d9" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.449250 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9e2b2d2-bd11-4098-b71a-d2787d834e9d" containerName="registry-server" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.449270 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="7819bf5a-7449-45b1-8abe-9c25f33f0b0a" containerName="route-controller-manager" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.450944 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.456250 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.459082 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qgl5s"] Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.469386 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-serving-cert\") pod \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.469495 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-config\") pod \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.469616 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-client-ca\") pod \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.469671 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gsgv\" (UniqueName: \"kubernetes.io/projected/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-kube-api-access-5gsgv\") pod \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\" (UID: \"7819bf5a-7449-45b1-8abe-9c25f33f0b0a\") " Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.470562 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-config" (OuterVolumeSpecName: "config") pod "7819bf5a-7449-45b1-8abe-9c25f33f0b0a" (UID: "7819bf5a-7449-45b1-8abe-9c25f33f0b0a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.470943 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c463bf2b-be16-4478-b58b-50c681a58749-utilities\") pod \"certified-operators-qgl5s\" (UID: \"c463bf2b-be16-4478-b58b-50c681a58749\") " pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.470958 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-client-ca" (OuterVolumeSpecName: "client-ca") pod "7819bf5a-7449-45b1-8abe-9c25f33f0b0a" (UID: "7819bf5a-7449-45b1-8abe-9c25f33f0b0a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.471114 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c463bf2b-be16-4478-b58b-50c681a58749-catalog-content\") pod \"certified-operators-qgl5s\" (UID: \"c463bf2b-be16-4478-b58b-50c681a58749\") " pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.471190 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxlhm\" (UniqueName: \"kubernetes.io/projected/c463bf2b-be16-4478-b58b-50c681a58749-kube-api-access-nxlhm\") pod \"certified-operators-qgl5s\" (UID: \"c463bf2b-be16-4478-b58b-50c681a58749\") " pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.471278 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.471298 4687 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.475726 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-kube-api-access-5gsgv" (OuterVolumeSpecName: "kube-api-access-5gsgv") pod "7819bf5a-7449-45b1-8abe-9c25f33f0b0a" (UID: "7819bf5a-7449-45b1-8abe-9c25f33f0b0a"). InnerVolumeSpecName "kube-api-access-5gsgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.476027 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7819bf5a-7449-45b1-8abe-9c25f33f0b0a" (UID: "7819bf5a-7449-45b1-8abe-9c25f33f0b0a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.572217 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c463bf2b-be16-4478-b58b-50c681a58749-utilities\") pod \"certified-operators-qgl5s\" (UID: \"c463bf2b-be16-4478-b58b-50c681a58749\") " pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.572479 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c463bf2b-be16-4478-b58b-50c681a58749-catalog-content\") pod \"certified-operators-qgl5s\" (UID: \"c463bf2b-be16-4478-b58b-50c681a58749\") " pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.572532 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxlhm\" (UniqueName: \"kubernetes.io/projected/c463bf2b-be16-4478-b58b-50c681a58749-kube-api-access-nxlhm\") pod \"certified-operators-qgl5s\" (UID: \"c463bf2b-be16-4478-b58b-50c681a58749\") " pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.572588 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gsgv\" (UniqueName: \"kubernetes.io/projected/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-kube-api-access-5gsgv\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.572600 4687 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7819bf5a-7449-45b1-8abe-9c25f33f0b0a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.573027 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c463bf2b-be16-4478-b58b-50c681a58749-utilities\") pod \"certified-operators-qgl5s\" (UID: \"c463bf2b-be16-4478-b58b-50c681a58749\") " pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.573842 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c463bf2b-be16-4478-b58b-50c681a58749-catalog-content\") pod \"certified-operators-qgl5s\" (UID: \"c463bf2b-be16-4478-b58b-50c681a58749\") " pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.588853 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxlhm\" (UniqueName: \"kubernetes.io/projected/c463bf2b-be16-4478-b58b-50c681a58749-kube-api-access-nxlhm\") pod \"certified-operators-qgl5s\" (UID: \"c463bf2b-be16-4478-b58b-50c681a58749\") " pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.643169 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t6b8w"] Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.646392 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.649621 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.656395 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t6b8w"] Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.674097 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4f80d13-afbf-4fd7-8af8-726aef33138a-catalog-content\") pod \"community-operators-t6b8w\" (UID: \"c4f80d13-afbf-4fd7-8af8-726aef33138a\") " pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.674170 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4f80d13-afbf-4fd7-8af8-726aef33138a-utilities\") pod \"community-operators-t6b8w\" (UID: \"c4f80d13-afbf-4fd7-8af8-726aef33138a\") " pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.674193 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9bsj\" (UniqueName: \"kubernetes.io/projected/c4f80d13-afbf-4fd7-8af8-726aef33138a-kube-api-access-c9bsj\") pod \"community-operators-t6b8w\" (UID: \"c4f80d13-afbf-4fd7-8af8-726aef33138a\") " pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.759277 4687 generic.go:334] "Generic (PLEG): container finished" podID="7819bf5a-7449-45b1-8abe-9c25f33f0b0a" containerID="49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313" exitCode=0 Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.759325 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.759321 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" event={"ID":"7819bf5a-7449-45b1-8abe-9c25f33f0b0a","Type":"ContainerDied","Data":"49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313"} Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.759372 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r" event={"ID":"7819bf5a-7449-45b1-8abe-9c25f33f0b0a","Type":"ContainerDied","Data":"2be96d7fdf2e56945b8c6e6b05bace35cc6a907b088e95bbae8865e7f0fe8a69"} Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.759388 4687 scope.go:117] "RemoveContainer" containerID="49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.776869 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.777986 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4f80d13-afbf-4fd7-8af8-726aef33138a-utilities\") pod \"community-operators-t6b8w\" (UID: \"c4f80d13-afbf-4fd7-8af8-726aef33138a\") " pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.778292 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9bsj\" (UniqueName: \"kubernetes.io/projected/c4f80d13-afbf-4fd7-8af8-726aef33138a-kube-api-access-c9bsj\") pod \"community-operators-t6b8w\" (UID: \"c4f80d13-afbf-4fd7-8af8-726aef33138a\") " pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.778604 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4f80d13-afbf-4fd7-8af8-726aef33138a-catalog-content\") pod \"community-operators-t6b8w\" (UID: \"c4f80d13-afbf-4fd7-8af8-726aef33138a\") " pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.778813 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4f80d13-afbf-4fd7-8af8-726aef33138a-utilities\") pod \"community-operators-t6b8w\" (UID: \"c4f80d13-afbf-4fd7-8af8-726aef33138a\") " pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.779417 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4f80d13-afbf-4fd7-8af8-726aef33138a-catalog-content\") pod \"community-operators-t6b8w\" (UID: \"c4f80d13-afbf-4fd7-8af8-726aef33138a\") " pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.800474 4687 scope.go:117] "RemoveContainer" containerID="49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313" Nov 25 09:09:42 crc kubenswrapper[4687]: E1125 09:09:42.800929 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313\": container with ID starting with 49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313 not found: ID does not exist" containerID="49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.800988 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313"} err="failed to get container status \"49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313\": rpc error: code = NotFound desc = could not find container \"49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313\": container with ID starting with 49066a9f194b5b418521aa354b45342bdf723ea051c5fba4598c95a2aa609313 not found: ID does not exist" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.803887 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r"] Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.809180 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-585b55f498-fqb4r"] Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.814160 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9bsj\" (UniqueName: \"kubernetes.io/projected/c4f80d13-afbf-4fd7-8af8-726aef33138a-kube-api-access-c9bsj\") pod \"community-operators-t6b8w\" (UID: \"c4f80d13-afbf-4fd7-8af8-726aef33138a\") " pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:42 crc kubenswrapper[4687]: I1125 09:09:42.969299 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.166400 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qgl5s"] Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.325783 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t6b8w"] Nov 25 09:09:43 crc kubenswrapper[4687]: W1125 09:09:43.331119 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4f80d13_afbf_4fd7_8af8_726aef33138a.slice/crio-cc8616888cb70f057f2547f2756506a140610df9c7742948465a444acd07033b WatchSource:0}: Error finding container cc8616888cb70f057f2547f2756506a140610df9c7742948465a444acd07033b: Status 404 returned error can't find the container with id cc8616888cb70f057f2547f2756506a140610df9c7742948465a444acd07033b Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.745616 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7819bf5a-7449-45b1-8abe-9c25f33f0b0a" path="/var/lib/kubelet/pods/7819bf5a-7449-45b1-8abe-9c25f33f0b0a/volumes" Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.769075 4687 generic.go:334] "Generic (PLEG): container finished" podID="c4f80d13-afbf-4fd7-8af8-726aef33138a" containerID="b7755c4a38304644c59e249de683758b8e10a9fac613d79f1d6b40707b55ddcd" exitCode=0 Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.769215 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6b8w" event={"ID":"c4f80d13-afbf-4fd7-8af8-726aef33138a","Type":"ContainerDied","Data":"b7755c4a38304644c59e249de683758b8e10a9fac613d79f1d6b40707b55ddcd"} Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.769268 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6b8w" event={"ID":"c4f80d13-afbf-4fd7-8af8-726aef33138a","Type":"ContainerStarted","Data":"cc8616888cb70f057f2547f2756506a140610df9c7742948465a444acd07033b"} Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.771330 4687 generic.go:334] "Generic (PLEG): container finished" podID="c463bf2b-be16-4478-b58b-50c681a58749" containerID="07492db666d8d43b62d3523372a55c3108537d3d630a2f69a3a13a17ae20ab78" exitCode=0 Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.771375 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgl5s" event={"ID":"c463bf2b-be16-4478-b58b-50c681a58749","Type":"ContainerDied","Data":"07492db666d8d43b62d3523372a55c3108537d3d630a2f69a3a13a17ae20ab78"} Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.771412 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgl5s" event={"ID":"c463bf2b-be16-4478-b58b-50c681a58749","Type":"ContainerStarted","Data":"32924321b1a2be748e152fa469c9506056f23d826b57af9817b7582ea1e4c719"} Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.905386 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj"] Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.906196 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.911090 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.912422 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.912536 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.912834 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.913078 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.916088 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj"] Nov 25 09:09:43 crc kubenswrapper[4687]: I1125 09:09:43.916342 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.097637 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7flh\" (UniqueName: \"kubernetes.io/projected/5e821d06-d067-4f09-8c39-a772d04200ec-kube-api-access-k7flh\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.097959 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e821d06-d067-4f09-8c39-a772d04200ec-client-ca\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.098008 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e821d06-d067-4f09-8c39-a772d04200ec-serving-cert\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.098059 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e821d06-d067-4f09-8c39-a772d04200ec-config\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.199121 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e821d06-d067-4f09-8c39-a772d04200ec-config\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.199182 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7flh\" (UniqueName: \"kubernetes.io/projected/5e821d06-d067-4f09-8c39-a772d04200ec-kube-api-access-k7flh\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.199207 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e821d06-d067-4f09-8c39-a772d04200ec-client-ca\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.199265 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e821d06-d067-4f09-8c39-a772d04200ec-serving-cert\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.200602 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e821d06-d067-4f09-8c39-a772d04200ec-client-ca\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.202206 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e821d06-d067-4f09-8c39-a772d04200ec-config\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.209453 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e821d06-d067-4f09-8c39-a772d04200ec-serving-cert\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.216738 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7flh\" (UniqueName: \"kubernetes.io/projected/5e821d06-d067-4f09-8c39-a772d04200ec-kube-api-access-k7flh\") pod \"route-controller-manager-55c4c7c674-8t7tj\" (UID: \"5e821d06-d067-4f09-8c39-a772d04200ec\") " pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.231212 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.685662 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj"] Nov 25 09:09:44 crc kubenswrapper[4687]: W1125 09:09:44.686622 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e821d06_d067_4f09_8c39_a772d04200ec.slice/crio-7e307986afcd480546310dfa500d9628bc571668bdaa753d6cd55103faec4d1a WatchSource:0}: Error finding container 7e307986afcd480546310dfa500d9628bc571668bdaa753d6cd55103faec4d1a: Status 404 returned error can't find the container with id 7e307986afcd480546310dfa500d9628bc571668bdaa753d6cd55103faec4d1a Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.777815 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" event={"ID":"5e821d06-d067-4f09-8c39-a772d04200ec","Type":"ContainerStarted","Data":"7e307986afcd480546310dfa500d9628bc571668bdaa753d6cd55103faec4d1a"} Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.779737 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6b8w" event={"ID":"c4f80d13-afbf-4fd7-8af8-726aef33138a","Type":"ContainerStarted","Data":"5a4558a01d98a1545fd8a0e7177bf928f63afbd8d07d86c1cd55dbacdfd211d5"} Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.784703 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgl5s" event={"ID":"c463bf2b-be16-4478-b58b-50c681a58749","Type":"ContainerStarted","Data":"8428cf4a208faa821f58ef64511b033f121262dc19ffbc965b3e33e1a03dc6b5"} Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.846647 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rf9fh"] Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.847627 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.849939 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 09:09:44 crc kubenswrapper[4687]: I1125 09:09:44.858366 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rf9fh"] Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.010167 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92741ed4-1138-4ac2-ad4b-0c558b1b574d-utilities\") pod \"redhat-marketplace-rf9fh\" (UID: \"92741ed4-1138-4ac2-ad4b-0c558b1b574d\") " pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.010244 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92741ed4-1138-4ac2-ad4b-0c558b1b574d-catalog-content\") pod \"redhat-marketplace-rf9fh\" (UID: \"92741ed4-1138-4ac2-ad4b-0c558b1b574d\") " pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.010283 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54g4v\" (UniqueName: \"kubernetes.io/projected/92741ed4-1138-4ac2-ad4b-0c558b1b574d-kube-api-access-54g4v\") pod \"redhat-marketplace-rf9fh\" (UID: \"92741ed4-1138-4ac2-ad4b-0c558b1b574d\") " pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.041250 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ml5cs"] Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.042224 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.045819 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.051992 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ml5cs"] Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.111769 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92741ed4-1138-4ac2-ad4b-0c558b1b574d-catalog-content\") pod \"redhat-marketplace-rf9fh\" (UID: \"92741ed4-1138-4ac2-ad4b-0c558b1b574d\") " pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.111846 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54g4v\" (UniqueName: \"kubernetes.io/projected/92741ed4-1138-4ac2-ad4b-0c558b1b574d-kube-api-access-54g4v\") pod \"redhat-marketplace-rf9fh\" (UID: \"92741ed4-1138-4ac2-ad4b-0c558b1b574d\") " pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.111893 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92741ed4-1138-4ac2-ad4b-0c558b1b574d-utilities\") pod \"redhat-marketplace-rf9fh\" (UID: \"92741ed4-1138-4ac2-ad4b-0c558b1b574d\") " pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.112335 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92741ed4-1138-4ac2-ad4b-0c558b1b574d-catalog-content\") pod \"redhat-marketplace-rf9fh\" (UID: \"92741ed4-1138-4ac2-ad4b-0c558b1b574d\") " pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.112390 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92741ed4-1138-4ac2-ad4b-0c558b1b574d-utilities\") pod \"redhat-marketplace-rf9fh\" (UID: \"92741ed4-1138-4ac2-ad4b-0c558b1b574d\") " pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.135651 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54g4v\" (UniqueName: \"kubernetes.io/projected/92741ed4-1138-4ac2-ad4b-0c558b1b574d-kube-api-access-54g4v\") pod \"redhat-marketplace-rf9fh\" (UID: \"92741ed4-1138-4ac2-ad4b-0c558b1b574d\") " pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.163608 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.215732 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d587d63c-f76c-47e0-8e59-ff79ca1d8390-utilities\") pod \"redhat-operators-ml5cs\" (UID: \"d587d63c-f76c-47e0-8e59-ff79ca1d8390\") " pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.215786 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d587d63c-f76c-47e0-8e59-ff79ca1d8390-catalog-content\") pod \"redhat-operators-ml5cs\" (UID: \"d587d63c-f76c-47e0-8e59-ff79ca1d8390\") " pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.215844 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-495gq\" (UniqueName: \"kubernetes.io/projected/d587d63c-f76c-47e0-8e59-ff79ca1d8390-kube-api-access-495gq\") pod \"redhat-operators-ml5cs\" (UID: \"d587d63c-f76c-47e0-8e59-ff79ca1d8390\") " pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.317599 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d587d63c-f76c-47e0-8e59-ff79ca1d8390-utilities\") pod \"redhat-operators-ml5cs\" (UID: \"d587d63c-f76c-47e0-8e59-ff79ca1d8390\") " pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.317956 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d587d63c-f76c-47e0-8e59-ff79ca1d8390-catalog-content\") pod \"redhat-operators-ml5cs\" (UID: \"d587d63c-f76c-47e0-8e59-ff79ca1d8390\") " pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.318019 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-495gq\" (UniqueName: \"kubernetes.io/projected/d587d63c-f76c-47e0-8e59-ff79ca1d8390-kube-api-access-495gq\") pod \"redhat-operators-ml5cs\" (UID: \"d587d63c-f76c-47e0-8e59-ff79ca1d8390\") " pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.318329 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d587d63c-f76c-47e0-8e59-ff79ca1d8390-catalog-content\") pod \"redhat-operators-ml5cs\" (UID: \"d587d63c-f76c-47e0-8e59-ff79ca1d8390\") " pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.318523 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d587d63c-f76c-47e0-8e59-ff79ca1d8390-utilities\") pod \"redhat-operators-ml5cs\" (UID: \"d587d63c-f76c-47e0-8e59-ff79ca1d8390\") " pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.337519 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-495gq\" (UniqueName: \"kubernetes.io/projected/d587d63c-f76c-47e0-8e59-ff79ca1d8390-kube-api-access-495gq\") pod \"redhat-operators-ml5cs\" (UID: \"d587d63c-f76c-47e0-8e59-ff79ca1d8390\") " pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.356037 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.585250 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rf9fh"] Nov 25 09:09:45 crc kubenswrapper[4687]: W1125 09:09:45.590215 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92741ed4_1138_4ac2_ad4b_0c558b1b574d.slice/crio-764a883302581177848c2d344a15c4a91452b9a151b1096a17c42d94030783fe WatchSource:0}: Error finding container 764a883302581177848c2d344a15c4a91452b9a151b1096a17c42d94030783fe: Status 404 returned error can't find the container with id 764a883302581177848c2d344a15c4a91452b9a151b1096a17c42d94030783fe Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.742812 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ml5cs"] Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.791752 4687 generic.go:334] "Generic (PLEG): container finished" podID="92741ed4-1138-4ac2-ad4b-0c558b1b574d" containerID="27699c83275206cbb74b9a1d9c8662a091d05a53ec54e4989a00f721f45d29da" exitCode=0 Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.791805 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rf9fh" event={"ID":"92741ed4-1138-4ac2-ad4b-0c558b1b574d","Type":"ContainerDied","Data":"27699c83275206cbb74b9a1d9c8662a091d05a53ec54e4989a00f721f45d29da"} Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.791886 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rf9fh" event={"ID":"92741ed4-1138-4ac2-ad4b-0c558b1b574d","Type":"ContainerStarted","Data":"764a883302581177848c2d344a15c4a91452b9a151b1096a17c42d94030783fe"} Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.794862 4687 generic.go:334] "Generic (PLEG): container finished" podID="c463bf2b-be16-4478-b58b-50c681a58749" containerID="8428cf4a208faa821f58ef64511b033f121262dc19ffbc965b3e33e1a03dc6b5" exitCode=0 Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.794918 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgl5s" event={"ID":"c463bf2b-be16-4478-b58b-50c681a58749","Type":"ContainerDied","Data":"8428cf4a208faa821f58ef64511b033f121262dc19ffbc965b3e33e1a03dc6b5"} Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.798369 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" event={"ID":"5e821d06-d067-4f09-8c39-a772d04200ec","Type":"ContainerStarted","Data":"619502e89e658ca2537333daab81fe6388fbf72d848f2e5cb562531293328177"} Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.798597 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.801229 4687 generic.go:334] "Generic (PLEG): container finished" podID="c4f80d13-afbf-4fd7-8af8-726aef33138a" containerID="5a4558a01d98a1545fd8a0e7177bf928f63afbd8d07d86c1cd55dbacdfd211d5" exitCode=0 Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.801254 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6b8w" event={"ID":"c4f80d13-afbf-4fd7-8af8-726aef33138a","Type":"ContainerDied","Data":"5a4558a01d98a1545fd8a0e7177bf928f63afbd8d07d86c1cd55dbacdfd211d5"} Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.806366 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" Nov 25 09:09:45 crc kubenswrapper[4687]: I1125 09:09:45.838516 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-55c4c7c674-8t7tj" podStartSLOduration=4.838479789 podStartE2EDuration="4.838479789s" podCreationTimestamp="2025-11-25 09:09:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:45.83129478 +0000 UTC m=+380.884934498" watchObservedRunningTime="2025-11-25 09:09:45.838479789 +0000 UTC m=+380.892119507" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.274837 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" podUID="5faad20b-1dd5-40df-8b0a-02890b547838" containerName="registry" containerID="cri-o://fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1" gracePeriod=30 Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.653839 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.808764 4687 generic.go:334] "Generic (PLEG): container finished" podID="5faad20b-1dd5-40df-8b0a-02890b547838" containerID="fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1" exitCode=0 Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.808831 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" event={"ID":"5faad20b-1dd5-40df-8b0a-02890b547838","Type":"ContainerDied","Data":"fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1"} Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.808860 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" event={"ID":"5faad20b-1dd5-40df-8b0a-02890b547838","Type":"ContainerDied","Data":"8b7abc5b9998d3193490851ed6dafee840c21add9c3523faf7b61a7fe89c096d"} Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.808878 4687 scope.go:117] "RemoveContainer" containerID="fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.808985 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2d9pq" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.813406 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6b8w" event={"ID":"c4f80d13-afbf-4fd7-8af8-726aef33138a","Type":"ContainerStarted","Data":"70078dc9a75c7abb4058a5d1fb10a1989128c6bb174c41c6a3f2e8e1d53dfd8a"} Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.818147 4687 generic.go:334] "Generic (PLEG): container finished" podID="d587d63c-f76c-47e0-8e59-ff79ca1d8390" containerID="e7094149cf4450021b3308046ee278e075a0a99df7a71e38e22fc924b2666f60" exitCode=0 Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.818203 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ml5cs" event={"ID":"d587d63c-f76c-47e0-8e59-ff79ca1d8390","Type":"ContainerDied","Data":"e7094149cf4450021b3308046ee278e075a0a99df7a71e38e22fc924b2666f60"} Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.818226 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ml5cs" event={"ID":"d587d63c-f76c-47e0-8e59-ff79ca1d8390","Type":"ContainerStarted","Data":"56fd2925c90ea07c29104b41c6672316a797164e52c246b29dc7a5a594dd3d31"} Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.824338 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rf9fh" event={"ID":"92741ed4-1138-4ac2-ad4b-0c558b1b574d","Type":"ContainerStarted","Data":"6a77f98d2db80c62a79df95bd0d6465bd9a5b9a097e70d9ff9b4fd7e28879ede"} Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.827480 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgl5s" event={"ID":"c463bf2b-be16-4478-b58b-50c681a58749","Type":"ContainerStarted","Data":"39421224ebc4f27d3fd631012ce5aa78c2c56a5c3817009e9317c8e267f6f64d"} Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.830353 4687 scope.go:117] "RemoveContainer" containerID="fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1" Nov 25 09:09:46 crc kubenswrapper[4687]: E1125 09:09:46.832818 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1\": container with ID starting with fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1 not found: ID does not exist" containerID="fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.832854 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1"} err="failed to get container status \"fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1\": rpc error: code = NotFound desc = could not find container \"fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1\": container with ID starting with fbd85dc543be1475150091a84ed73f0363592d9ffd9113139278e64b065e77e1 not found: ID does not exist" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.842379 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"5faad20b-1dd5-40df-8b0a-02890b547838\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.842447 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-registry-certificates\") pod \"5faad20b-1dd5-40df-8b0a-02890b547838\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.842470 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5faad20b-1dd5-40df-8b0a-02890b547838-installation-pull-secrets\") pod \"5faad20b-1dd5-40df-8b0a-02890b547838\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.842493 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ct98d\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-kube-api-access-ct98d\") pod \"5faad20b-1dd5-40df-8b0a-02890b547838\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.842534 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-registry-tls\") pod \"5faad20b-1dd5-40df-8b0a-02890b547838\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.842552 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-trusted-ca\") pod \"5faad20b-1dd5-40df-8b0a-02890b547838\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.843434 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "5faad20b-1dd5-40df-8b0a-02890b547838" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.842716 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-bound-sa-token\") pod \"5faad20b-1dd5-40df-8b0a-02890b547838\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.843536 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5faad20b-1dd5-40df-8b0a-02890b547838-ca-trust-extracted\") pod \"5faad20b-1dd5-40df-8b0a-02890b547838\" (UID: \"5faad20b-1dd5-40df-8b0a-02890b547838\") " Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.843580 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "5faad20b-1dd5-40df-8b0a-02890b547838" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.848512 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "5faad20b-1dd5-40df-8b0a-02890b547838" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.849888 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "5faad20b-1dd5-40df-8b0a-02890b547838" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.850032 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-kube-api-access-ct98d" (OuterVolumeSpecName: "kube-api-access-ct98d") pod "5faad20b-1dd5-40df-8b0a-02890b547838" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838"). InnerVolumeSpecName "kube-api-access-ct98d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.852477 4687 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.852530 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ct98d\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-kube-api-access-ct98d\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.852544 4687 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.852555 4687 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5faad20b-1dd5-40df-8b0a-02890b547838-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.852566 4687 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5faad20b-1dd5-40df-8b0a-02890b547838-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.853208 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5faad20b-1dd5-40df-8b0a-02890b547838-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "5faad20b-1dd5-40df-8b0a-02890b547838" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.854095 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "5faad20b-1dd5-40df-8b0a-02890b547838" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.865488 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5faad20b-1dd5-40df-8b0a-02890b547838-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "5faad20b-1dd5-40df-8b0a-02890b547838" (UID: "5faad20b-1dd5-40df-8b0a-02890b547838"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.865887 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t6b8w" podStartSLOduration=2.349033755 podStartE2EDuration="4.865866703s" podCreationTimestamp="2025-11-25 09:09:42 +0000 UTC" firstStartedPulling="2025-11-25 09:09:43.771113224 +0000 UTC m=+378.824752982" lastFinishedPulling="2025-11-25 09:09:46.287946212 +0000 UTC m=+381.341585930" observedRunningTime="2025-11-25 09:09:46.840379118 +0000 UTC m=+381.894018846" watchObservedRunningTime="2025-11-25 09:09:46.865866703 +0000 UTC m=+381.919506421" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.917116 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qgl5s" podStartSLOduration=2.241807007 podStartE2EDuration="4.917099417s" podCreationTimestamp="2025-11-25 09:09:42 +0000 UTC" firstStartedPulling="2025-11-25 09:09:43.774819533 +0000 UTC m=+378.828459281" lastFinishedPulling="2025-11-25 09:09:46.450111973 +0000 UTC m=+381.503751691" observedRunningTime="2025-11-25 09:09:46.911574896 +0000 UTC m=+381.965214624" watchObservedRunningTime="2025-11-25 09:09:46.917099417 +0000 UTC m=+381.970739135" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.953960 4687 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5faad20b-1dd5-40df-8b0a-02890b547838-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:46 crc kubenswrapper[4687]: I1125 09:09:46.954001 4687 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5faad20b-1dd5-40df-8b0a-02890b547838-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:47 crc kubenswrapper[4687]: I1125 09:09:47.132658 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2d9pq"] Nov 25 09:09:47 crc kubenswrapper[4687]: I1125 09:09:47.137420 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2d9pq"] Nov 25 09:09:47 crc kubenswrapper[4687]: I1125 09:09:47.745889 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5faad20b-1dd5-40df-8b0a-02890b547838" path="/var/lib/kubelet/pods/5faad20b-1dd5-40df-8b0a-02890b547838/volumes" Nov 25 09:09:47 crc kubenswrapper[4687]: I1125 09:09:47.843244 4687 generic.go:334] "Generic (PLEG): container finished" podID="92741ed4-1138-4ac2-ad4b-0c558b1b574d" containerID="6a77f98d2db80c62a79df95bd0d6465bd9a5b9a097e70d9ff9b4fd7e28879ede" exitCode=0 Nov 25 09:09:47 crc kubenswrapper[4687]: I1125 09:09:47.843571 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rf9fh" event={"ID":"92741ed4-1138-4ac2-ad4b-0c558b1b574d","Type":"ContainerDied","Data":"6a77f98d2db80c62a79df95bd0d6465bd9a5b9a097e70d9ff9b4fd7e28879ede"} Nov 25 09:09:48 crc kubenswrapper[4687]: I1125 09:09:48.852287 4687 generic.go:334] "Generic (PLEG): container finished" podID="d587d63c-f76c-47e0-8e59-ff79ca1d8390" containerID="0160e01556acd305322ec8ded581d10dc799d9103307840374eb15b57af06515" exitCode=0 Nov 25 09:09:48 crc kubenswrapper[4687]: I1125 09:09:48.852384 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ml5cs" event={"ID":"d587d63c-f76c-47e0-8e59-ff79ca1d8390","Type":"ContainerDied","Data":"0160e01556acd305322ec8ded581d10dc799d9103307840374eb15b57af06515"} Nov 25 09:09:48 crc kubenswrapper[4687]: I1125 09:09:48.855154 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rf9fh" event={"ID":"92741ed4-1138-4ac2-ad4b-0c558b1b574d","Type":"ContainerStarted","Data":"a4c855f31b63892446a6b50800afc5cb32f58c683989562baf24dab695c88c0f"} Nov 25 09:09:48 crc kubenswrapper[4687]: I1125 09:09:48.886470 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rf9fh" podStartSLOduration=2.3717366970000002 podStartE2EDuration="4.886453032s" podCreationTimestamp="2025-11-25 09:09:44 +0000 UTC" firstStartedPulling="2025-11-25 09:09:45.79324921 +0000 UTC m=+380.846888928" lastFinishedPulling="2025-11-25 09:09:48.307965545 +0000 UTC m=+383.361605263" observedRunningTime="2025-11-25 09:09:48.882881508 +0000 UTC m=+383.936521226" watchObservedRunningTime="2025-11-25 09:09:48.886453032 +0000 UTC m=+383.940092750" Nov 25 09:09:49 crc kubenswrapper[4687]: I1125 09:09:49.863056 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ml5cs" event={"ID":"d587d63c-f76c-47e0-8e59-ff79ca1d8390","Type":"ContainerStarted","Data":"7e95e4f72a7af1ac6708c9edcc11eba3c8f878e938415de4af55c7c1b1827cdd"} Nov 25 09:09:49 crc kubenswrapper[4687]: I1125 09:09:49.886294 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ml5cs" podStartSLOduration=2.244336905 podStartE2EDuration="4.886273942s" podCreationTimestamp="2025-11-25 09:09:45 +0000 UTC" firstStartedPulling="2025-11-25 09:09:46.819241642 +0000 UTC m=+381.872881350" lastFinishedPulling="2025-11-25 09:09:49.461178669 +0000 UTC m=+384.514818387" observedRunningTime="2025-11-25 09:09:49.882347137 +0000 UTC m=+384.935986855" watchObservedRunningTime="2025-11-25 09:09:49.886273942 +0000 UTC m=+384.939913660" Nov 25 09:09:52 crc kubenswrapper[4687]: I1125 09:09:52.777601 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:52 crc kubenswrapper[4687]: I1125 09:09:52.778149 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:52 crc kubenswrapper[4687]: I1125 09:09:52.816770 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:52 crc kubenswrapper[4687]: I1125 09:09:52.923044 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qgl5s" Nov 25 09:09:52 crc kubenswrapper[4687]: I1125 09:09:52.970278 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:52 crc kubenswrapper[4687]: I1125 09:09:52.970316 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:53 crc kubenswrapper[4687]: I1125 09:09:53.010165 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:53 crc kubenswrapper[4687]: I1125 09:09:53.844689 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:09:53 crc kubenswrapper[4687]: I1125 09:09:53.844742 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:09:53 crc kubenswrapper[4687]: I1125 09:09:53.918889 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t6b8w" Nov 25 09:09:55 crc kubenswrapper[4687]: I1125 09:09:55.163749 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:55 crc kubenswrapper[4687]: I1125 09:09:55.163825 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:55 crc kubenswrapper[4687]: I1125 09:09:55.200068 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:55 crc kubenswrapper[4687]: I1125 09:09:55.357559 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:55 crc kubenswrapper[4687]: I1125 09:09:55.357608 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:55 crc kubenswrapper[4687]: I1125 09:09:55.394683 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:09:55 crc kubenswrapper[4687]: I1125 09:09:55.936157 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rf9fh" Nov 25 09:09:55 crc kubenswrapper[4687]: I1125 09:09:55.936571 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ml5cs" Nov 25 09:10:23 crc kubenswrapper[4687]: I1125 09:10:23.844836 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:10:23 crc kubenswrapper[4687]: I1125 09:10:23.845384 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:10:23 crc kubenswrapper[4687]: I1125 09:10:23.845432 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:10:23 crc kubenswrapper[4687]: I1125 09:10:23.846017 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c575149652dbebb18c6b942edd7015230ac6861490511f4bb26940eebb3f97bc"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:10:23 crc kubenswrapper[4687]: I1125 09:10:23.846081 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://c575149652dbebb18c6b942edd7015230ac6861490511f4bb26940eebb3f97bc" gracePeriod=600 Nov 25 09:10:25 crc kubenswrapper[4687]: I1125 09:10:25.109839 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="c575149652dbebb18c6b942edd7015230ac6861490511f4bb26940eebb3f97bc" exitCode=0 Nov 25 09:10:25 crc kubenswrapper[4687]: I1125 09:10:25.109931 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"c575149652dbebb18c6b942edd7015230ac6861490511f4bb26940eebb3f97bc"} Nov 25 09:10:25 crc kubenswrapper[4687]: I1125 09:10:25.110460 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"b8e887c987f853d9c47a668160efcae36234097479afa1ebd1cdcaf883e881ed"} Nov 25 09:10:25 crc kubenswrapper[4687]: I1125 09:10:25.110493 4687 scope.go:117] "RemoveContainer" containerID="207008fd85e1acc3c5f96320c6a7207d27f66daa275d6f26cbe8de22f5a94174" Nov 25 09:12:26 crc kubenswrapper[4687]: I1125 09:12:26.075390 4687 scope.go:117] "RemoveContainer" containerID="3a2293f04b12d5c3b1c82bb4e57ad8d5a6ae17331129ef3244d477b16aa7e7e9" Nov 25 09:12:26 crc kubenswrapper[4687]: I1125 09:12:26.093780 4687 scope.go:117] "RemoveContainer" containerID="944c0f6b7c0757082b60b531c80fef2ca0d4fb9d53af605b456cb5477d50fb99" Nov 25 09:12:53 crc kubenswrapper[4687]: I1125 09:12:53.844944 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:12:53 crc kubenswrapper[4687]: I1125 09:12:53.845533 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:13:23 crc kubenswrapper[4687]: I1125 09:13:23.844465 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:13:23 crc kubenswrapper[4687]: I1125 09:13:23.845138 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:13:26 crc kubenswrapper[4687]: I1125 09:13:26.122010 4687 scope.go:117] "RemoveContainer" containerID="e8982d773651f2e62e7d96d6fdbc3f6385e9b3005f5a931561d67f35280ce968" Nov 25 09:13:26 crc kubenswrapper[4687]: I1125 09:13:26.137943 4687 scope.go:117] "RemoveContainer" containerID="3f52a0a7dbd55251e891ce827a37bfce4603522247be1ae37e304874b212f36c" Nov 25 09:13:53 crc kubenswrapper[4687]: I1125 09:13:53.844917 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:13:53 crc kubenswrapper[4687]: I1125 09:13:53.845549 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:13:53 crc kubenswrapper[4687]: I1125 09:13:53.845595 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:13:53 crc kubenswrapper[4687]: I1125 09:13:53.846215 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b8e887c987f853d9c47a668160efcae36234097479afa1ebd1cdcaf883e881ed"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:13:53 crc kubenswrapper[4687]: I1125 09:13:53.846276 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://b8e887c987f853d9c47a668160efcae36234097479afa1ebd1cdcaf883e881ed" gracePeriod=600 Nov 25 09:13:54 crc kubenswrapper[4687]: I1125 09:13:54.375436 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="b8e887c987f853d9c47a668160efcae36234097479afa1ebd1cdcaf883e881ed" exitCode=0 Nov 25 09:13:54 crc kubenswrapper[4687]: I1125 09:13:54.375527 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"b8e887c987f853d9c47a668160efcae36234097479afa1ebd1cdcaf883e881ed"} Nov 25 09:13:54 crc kubenswrapper[4687]: I1125 09:13:54.375770 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"0b6f3d727e83b272772a62a183625cfadd2675aa7e49dfb9d67bfd134f3394f8"} Nov 25 09:13:54 crc kubenswrapper[4687]: I1125 09:13:54.375786 4687 scope.go:117] "RemoveContainer" containerID="c575149652dbebb18c6b942edd7015230ac6861490511f4bb26940eebb3f97bc" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.179097 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk"] Nov 25 09:15:00 crc kubenswrapper[4687]: E1125 09:15:00.181527 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5faad20b-1dd5-40df-8b0a-02890b547838" containerName="registry" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.181549 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="5faad20b-1dd5-40df-8b0a-02890b547838" containerName="registry" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.181694 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="5faad20b-1dd5-40df-8b0a-02890b547838" containerName="registry" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.182307 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.187327 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.187479 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.188307 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk"] Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.372327 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5883acaa-0d39-4b0d-ac93-b746fdb143a9-config-volume\") pod \"collect-profiles-29401035-dkrvk\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.372401 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5883acaa-0d39-4b0d-ac93-b746fdb143a9-secret-volume\") pod \"collect-profiles-29401035-dkrvk\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.372590 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls4pw\" (UniqueName: \"kubernetes.io/projected/5883acaa-0d39-4b0d-ac93-b746fdb143a9-kube-api-access-ls4pw\") pod \"collect-profiles-29401035-dkrvk\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.474568 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls4pw\" (UniqueName: \"kubernetes.io/projected/5883acaa-0d39-4b0d-ac93-b746fdb143a9-kube-api-access-ls4pw\") pod \"collect-profiles-29401035-dkrvk\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.474655 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5883acaa-0d39-4b0d-ac93-b746fdb143a9-config-volume\") pod \"collect-profiles-29401035-dkrvk\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.474698 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5883acaa-0d39-4b0d-ac93-b746fdb143a9-secret-volume\") pod \"collect-profiles-29401035-dkrvk\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.475492 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5883acaa-0d39-4b0d-ac93-b746fdb143a9-config-volume\") pod \"collect-profiles-29401035-dkrvk\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.486733 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5883acaa-0d39-4b0d-ac93-b746fdb143a9-secret-volume\") pod \"collect-profiles-29401035-dkrvk\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.491786 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls4pw\" (UniqueName: \"kubernetes.io/projected/5883acaa-0d39-4b0d-ac93-b746fdb143a9-kube-api-access-ls4pw\") pod \"collect-profiles-29401035-dkrvk\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.501992 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.683239 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk"] Nov 25 09:15:00 crc kubenswrapper[4687]: I1125 09:15:00.765332 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" event={"ID":"5883acaa-0d39-4b0d-ac93-b746fdb143a9","Type":"ContainerStarted","Data":"20e31eaa8f91bb58225a11fa4038911eb72b933200992a3cfd57ab5c059a8fcb"} Nov 25 09:15:01 crc kubenswrapper[4687]: I1125 09:15:01.771245 4687 generic.go:334] "Generic (PLEG): container finished" podID="5883acaa-0d39-4b0d-ac93-b746fdb143a9" containerID="d24ee0c9a9df572297e7bfea8eeea9f97bf6370a766d31bfcb34e54342146249" exitCode=0 Nov 25 09:15:01 crc kubenswrapper[4687]: I1125 09:15:01.771309 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" event={"ID":"5883acaa-0d39-4b0d-ac93-b746fdb143a9","Type":"ContainerDied","Data":"d24ee0c9a9df572297e7bfea8eeea9f97bf6370a766d31bfcb34e54342146249"} Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.057193 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.208937 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5883acaa-0d39-4b0d-ac93-b746fdb143a9-secret-volume\") pod \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.209007 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5883acaa-0d39-4b0d-ac93-b746fdb143a9-config-volume\") pod \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.209037 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ls4pw\" (UniqueName: \"kubernetes.io/projected/5883acaa-0d39-4b0d-ac93-b746fdb143a9-kube-api-access-ls4pw\") pod \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\" (UID: \"5883acaa-0d39-4b0d-ac93-b746fdb143a9\") " Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.210037 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5883acaa-0d39-4b0d-ac93-b746fdb143a9-config-volume" (OuterVolumeSpecName: "config-volume") pod "5883acaa-0d39-4b0d-ac93-b746fdb143a9" (UID: "5883acaa-0d39-4b0d-ac93-b746fdb143a9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.219136 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5883acaa-0d39-4b0d-ac93-b746fdb143a9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5883acaa-0d39-4b0d-ac93-b746fdb143a9" (UID: "5883acaa-0d39-4b0d-ac93-b746fdb143a9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.221171 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5883acaa-0d39-4b0d-ac93-b746fdb143a9-kube-api-access-ls4pw" (OuterVolumeSpecName: "kube-api-access-ls4pw") pod "5883acaa-0d39-4b0d-ac93-b746fdb143a9" (UID: "5883acaa-0d39-4b0d-ac93-b746fdb143a9"). InnerVolumeSpecName "kube-api-access-ls4pw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.310578 4687 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5883acaa-0d39-4b0d-ac93-b746fdb143a9-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.310615 4687 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5883acaa-0d39-4b0d-ac93-b746fdb143a9-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.310628 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ls4pw\" (UniqueName: \"kubernetes.io/projected/5883acaa-0d39-4b0d-ac93-b746fdb143a9-kube-api-access-ls4pw\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.788944 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" event={"ID":"5883acaa-0d39-4b0d-ac93-b746fdb143a9","Type":"ContainerDied","Data":"20e31eaa8f91bb58225a11fa4038911eb72b933200992a3cfd57ab5c059a8fcb"} Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.788995 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20e31eaa8f91bb58225a11fa4038911eb72b933200992a3cfd57ab5c059a8fcb" Nov 25 09:15:03 crc kubenswrapper[4687]: I1125 09:15:03.789017 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.091780 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-gk2n9"] Nov 25 09:15:35 crc kubenswrapper[4687]: E1125 09:15:35.092645 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5883acaa-0d39-4b0d-ac93-b746fdb143a9" containerName="collect-profiles" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.092663 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="5883acaa-0d39-4b0d-ac93-b746fdb143a9" containerName="collect-profiles" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.092773 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="5883acaa-0d39-4b0d-ac93-b746fdb143a9" containerName="collect-profiles" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.093231 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-gk2n9" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.097492 4687 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-jn6vb" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.097955 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.116460 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-gk2n9"] Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.124271 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.142404 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-f22x8"] Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.143080 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-f22x8" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.147802 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-tc7r6"] Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.148454 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-tc7r6" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.154383 4687 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-cmq5l" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.154396 4687 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-2vsdr" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.168406 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-tc7r6"] Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.173655 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-f22x8"] Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.213154 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrpg6\" (UniqueName: \"kubernetes.io/projected/643e6584-e9ad-4fe0-96a6-d1dda245fe76-kube-api-access-hrpg6\") pod \"cert-manager-cainjector-7f985d654d-gk2n9\" (UID: \"643e6584-e9ad-4fe0-96a6-d1dda245fe76\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-gk2n9" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.314389 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrpg6\" (UniqueName: \"kubernetes.io/projected/643e6584-e9ad-4fe0-96a6-d1dda245fe76-kube-api-access-hrpg6\") pod \"cert-manager-cainjector-7f985d654d-gk2n9\" (UID: \"643e6584-e9ad-4fe0-96a6-d1dda245fe76\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-gk2n9" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.314478 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bcmq\" (UniqueName: \"kubernetes.io/projected/4506551b-78bf-4fe5-8b60-e9e34a53c8df-kube-api-access-7bcmq\") pod \"cert-manager-webhook-5655c58dd6-f22x8\" (UID: \"4506551b-78bf-4fe5-8b60-e9e34a53c8df\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-f22x8" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.314508 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vgtl\" (UniqueName: \"kubernetes.io/projected/0ff14130-8c48-4847-8fa6-1ba61b371244-kube-api-access-9vgtl\") pod \"cert-manager-5b446d88c5-tc7r6\" (UID: \"0ff14130-8c48-4847-8fa6-1ba61b371244\") " pod="cert-manager/cert-manager-5b446d88c5-tc7r6" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.339565 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrpg6\" (UniqueName: \"kubernetes.io/projected/643e6584-e9ad-4fe0-96a6-d1dda245fe76-kube-api-access-hrpg6\") pod \"cert-manager-cainjector-7f985d654d-gk2n9\" (UID: \"643e6584-e9ad-4fe0-96a6-d1dda245fe76\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-gk2n9" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.413930 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-gk2n9" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.415647 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bcmq\" (UniqueName: \"kubernetes.io/projected/4506551b-78bf-4fe5-8b60-e9e34a53c8df-kube-api-access-7bcmq\") pod \"cert-manager-webhook-5655c58dd6-f22x8\" (UID: \"4506551b-78bf-4fe5-8b60-e9e34a53c8df\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-f22x8" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.415689 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vgtl\" (UniqueName: \"kubernetes.io/projected/0ff14130-8c48-4847-8fa6-1ba61b371244-kube-api-access-9vgtl\") pod \"cert-manager-5b446d88c5-tc7r6\" (UID: \"0ff14130-8c48-4847-8fa6-1ba61b371244\") " pod="cert-manager/cert-manager-5b446d88c5-tc7r6" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.433710 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vgtl\" (UniqueName: \"kubernetes.io/projected/0ff14130-8c48-4847-8fa6-1ba61b371244-kube-api-access-9vgtl\") pod \"cert-manager-5b446d88c5-tc7r6\" (UID: \"0ff14130-8c48-4847-8fa6-1ba61b371244\") " pod="cert-manager/cert-manager-5b446d88c5-tc7r6" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.435641 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bcmq\" (UniqueName: \"kubernetes.io/projected/4506551b-78bf-4fe5-8b60-e9e34a53c8df-kube-api-access-7bcmq\") pod \"cert-manager-webhook-5655c58dd6-f22x8\" (UID: \"4506551b-78bf-4fe5-8b60-e9e34a53c8df\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-f22x8" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.464124 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-f22x8" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.483489 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-tc7r6" Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.814227 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-gk2n9"] Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.823983 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.884790 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-tc7r6"] Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.888399 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-f22x8"] Nov 25 09:15:35 crc kubenswrapper[4687]: W1125 09:15:35.897902 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ff14130_8c48_4847_8fa6_1ba61b371244.slice/crio-53c111706b0d06a7e4deafe5694581f06eefd773cacf51dd01ac35df23449e31 WatchSource:0}: Error finding container 53c111706b0d06a7e4deafe5694581f06eefd773cacf51dd01ac35df23449e31: Status 404 returned error can't find the container with id 53c111706b0d06a7e4deafe5694581f06eefd773cacf51dd01ac35df23449e31 Nov 25 09:15:35 crc kubenswrapper[4687]: W1125 09:15:35.899979 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4506551b_78bf_4fe5_8b60_e9e34a53c8df.slice/crio-69fc05f329a79f9a9902e50a2900cb607aa187ce186b6f5bbcd6efb099cc1bd2 WatchSource:0}: Error finding container 69fc05f329a79f9a9902e50a2900cb607aa187ce186b6f5bbcd6efb099cc1bd2: Status 404 returned error can't find the container with id 69fc05f329a79f9a9902e50a2900cb607aa187ce186b6f5bbcd6efb099cc1bd2 Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.974419 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-tc7r6" event={"ID":"0ff14130-8c48-4847-8fa6-1ba61b371244","Type":"ContainerStarted","Data":"53c111706b0d06a7e4deafe5694581f06eefd773cacf51dd01ac35df23449e31"} Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.975705 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-f22x8" event={"ID":"4506551b-78bf-4fe5-8b60-e9e34a53c8df","Type":"ContainerStarted","Data":"69fc05f329a79f9a9902e50a2900cb607aa187ce186b6f5bbcd6efb099cc1bd2"} Nov 25 09:15:35 crc kubenswrapper[4687]: I1125 09:15:35.976923 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-gk2n9" event={"ID":"643e6584-e9ad-4fe0-96a6-d1dda245fe76","Type":"ContainerStarted","Data":"423beabeb8cec8560f5dc6284bceb331a58ef58ea43a754576ecd2ce3b7e2b66"} Nov 25 09:15:43 crc kubenswrapper[4687]: I1125 09:15:43.018148 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-f22x8" event={"ID":"4506551b-78bf-4fe5-8b60-e9e34a53c8df","Type":"ContainerStarted","Data":"990f8527491c5de3b65f08447121dde6233db2609af92d80897645f3754dfe6e"} Nov 25 09:15:43 crc kubenswrapper[4687]: I1125 09:15:43.018817 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-f22x8" Nov 25 09:15:43 crc kubenswrapper[4687]: I1125 09:15:43.021693 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-gk2n9" event={"ID":"643e6584-e9ad-4fe0-96a6-d1dda245fe76","Type":"ContainerStarted","Data":"ba6b8ec82da8cc25ab6682d8ac27e1ee7f544269e5b84988e140e017df2e1688"} Nov 25 09:15:43 crc kubenswrapper[4687]: I1125 09:15:43.023217 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-tc7r6" event={"ID":"0ff14130-8c48-4847-8fa6-1ba61b371244","Type":"ContainerStarted","Data":"92c950c0002ebce73d73ebdc52e93dfce506cf070682c4d19f8f9842dbe6634b"} Nov 25 09:15:43 crc kubenswrapper[4687]: I1125 09:15:43.038183 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-f22x8" podStartSLOduration=1.526528689 podStartE2EDuration="8.03815846s" podCreationTimestamp="2025-11-25 09:15:35 +0000 UTC" firstStartedPulling="2025-11-25 09:15:35.908566676 +0000 UTC m=+730.962206404" lastFinishedPulling="2025-11-25 09:15:42.420196447 +0000 UTC m=+737.473836175" observedRunningTime="2025-11-25 09:15:43.032873515 +0000 UTC m=+738.086513233" watchObservedRunningTime="2025-11-25 09:15:43.03815846 +0000 UTC m=+738.091798178" Nov 25 09:15:43 crc kubenswrapper[4687]: I1125 09:15:43.047814 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-tc7r6" podStartSLOduration=1.634704055 podStartE2EDuration="8.047797245s" podCreationTimestamp="2025-11-25 09:15:35 +0000 UTC" firstStartedPulling="2025-11-25 09:15:35.9000079 +0000 UTC m=+730.953647608" lastFinishedPulling="2025-11-25 09:15:42.31310108 +0000 UTC m=+737.366740798" observedRunningTime="2025-11-25 09:15:43.045977535 +0000 UTC m=+738.099617243" watchObservedRunningTime="2025-11-25 09:15:43.047797245 +0000 UTC m=+738.101436963" Nov 25 09:15:43 crc kubenswrapper[4687]: I1125 09:15:43.069452 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-gk2n9" podStartSLOduration=1.459546215 podStartE2EDuration="8.0694345s" podCreationTimestamp="2025-11-25 09:15:35 +0000 UTC" firstStartedPulling="2025-11-25 09:15:35.8236881 +0000 UTC m=+730.877327828" lastFinishedPulling="2025-11-25 09:15:42.433576365 +0000 UTC m=+737.487216113" observedRunningTime="2025-11-25 09:15:43.068189206 +0000 UTC m=+738.121828924" watchObservedRunningTime="2025-11-25 09:15:43.0694345 +0000 UTC m=+738.123074218" Nov 25 09:15:45 crc kubenswrapper[4687]: I1125 09:15:45.549975 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-p68hx"] Nov 25 09:15:45 crc kubenswrapper[4687]: I1125 09:15:45.550682 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovn-controller" containerID="cri-o://b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858" gracePeriod=30 Nov 25 09:15:45 crc kubenswrapper[4687]: I1125 09:15:45.550715 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="nbdb" containerID="cri-o://61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b" gracePeriod=30 Nov 25 09:15:45 crc kubenswrapper[4687]: I1125 09:15:45.550799 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovn-acl-logging" containerID="cri-o://d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d" gracePeriod=30 Nov 25 09:15:45 crc kubenswrapper[4687]: I1125 09:15:45.550791 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="sbdb" containerID="cri-o://6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682" gracePeriod=30 Nov 25 09:15:45 crc kubenswrapper[4687]: I1125 09:15:45.550830 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f" gracePeriod=30 Nov 25 09:15:45 crc kubenswrapper[4687]: I1125 09:15:45.550883 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="northd" containerID="cri-o://4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f" gracePeriod=30 Nov 25 09:15:45 crc kubenswrapper[4687]: I1125 09:15:45.550857 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="kube-rbac-proxy-node" containerID="cri-o://66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6" gracePeriod=30 Nov 25 09:15:45 crc kubenswrapper[4687]: I1125 09:15:45.608573 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" containerID="cri-o://58d2e36fa26a4e84c8f9778e4cd44e111aa6feda866a77e47f9ba7ff7501bcd0" gracePeriod=30 Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.041189 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wlzrb_0e7c96e4-c7fa-466f-b0b6-495612ed71f8/kube-multus/2.log" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.042331 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wlzrb_0e7c96e4-c7fa-466f-b0b6-495612ed71f8/kube-multus/1.log" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.042420 4687 generic.go:334] "Generic (PLEG): container finished" podID="0e7c96e4-c7fa-466f-b0b6-495612ed71f8" containerID="c3581ad6cb85979bdd2e1342025b4b624bb9bebc32733ee23eb68b287b096ec6" exitCode=2 Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.042517 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wlzrb" event={"ID":"0e7c96e4-c7fa-466f-b0b6-495612ed71f8","Type":"ContainerDied","Data":"c3581ad6cb85979bdd2e1342025b4b624bb9bebc32733ee23eb68b287b096ec6"} Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.042556 4687 scope.go:117] "RemoveContainer" containerID="fd7297dc4adfd4a2e6ea288887f965d19bcf553824c5f7813dfa12052c60d189" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.043808 4687 scope.go:117] "RemoveContainer" containerID="c3581ad6cb85979bdd2e1342025b4b624bb9bebc32733ee23eb68b287b096ec6" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.048736 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovnkube-controller/3.log" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.055320 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovn-acl-logging/0.log" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.056378 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovn-controller/0.log" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.057067 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="58d2e36fa26a4e84c8f9778e4cd44e111aa6feda866a77e47f9ba7ff7501bcd0" exitCode=0 Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.057098 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682" exitCode=0 Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.057132 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"58d2e36fa26a4e84c8f9778e4cd44e111aa6feda866a77e47f9ba7ff7501bcd0"} Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.057192 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682"} Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.057204 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b"} Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058250 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b" exitCode=0 Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058280 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f" exitCode=0 Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058291 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f" exitCode=0 Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058299 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6" exitCode=0 Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058307 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d" exitCode=143 Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058316 4687 generic.go:334] "Generic (PLEG): container finished" podID="d371271f-84c3-405c-b41f-604a06c1bb71" containerID="b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858" exitCode=143 Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058337 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f"} Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058362 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f"} Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058372 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6"} Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058380 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d"} Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.058387 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858"} Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.083792 4687 scope.go:117] "RemoveContainer" containerID="b79d16f32d9cc4164ef4e877c4d4ce2c66ab956905e0969302bc5ed18ae02219" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.413243 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovn-acl-logging/0.log" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.414384 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovn-controller/0.log" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.414905 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.475962 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-jjzng"] Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476215 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476235 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476312 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476324 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476358 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="nbdb" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476368 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="nbdb" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476381 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476390 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476401 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="kube-rbac-proxy-node" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476411 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="kube-rbac-proxy-node" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476426 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476434 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476475 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476483 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476493 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovn-acl-logging" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476504 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovn-acl-logging" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476531 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="sbdb" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476539 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="sbdb" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476552 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476560 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476573 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="kubecfg-setup" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476581 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="kubecfg-setup" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476592 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="northd" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476600 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="northd" Nov 25 09:15:46 crc kubenswrapper[4687]: E1125 09:15:46.476611 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovn-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476620 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovn-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476774 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476785 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476796 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="sbdb" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476815 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476824 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="nbdb" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476837 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476845 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="northd" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476859 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovn-acl-logging" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476868 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovn-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.476883 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="kube-rbac-proxy-node" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.477097 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.477322 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" containerName="ovnkube-controller" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.480109 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563620 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-kubelet\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563770 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-ovn\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563626 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563828 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-netns\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563851 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-slash\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563870 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-bin\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563846 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563913 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563921 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-env-overrides\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563939 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-slash" (OuterVolumeSpecName: "host-slash") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563954 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-script-lib\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563962 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.563998 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-var-lib-cni-networks-ovn-kubernetes\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564033 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-node-log\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564064 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-var-lib-openvswitch\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564100 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-ovn-kubernetes\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564125 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-openvswitch\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564152 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-etc-openvswitch\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564177 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-config\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564197 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-systemd\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564223 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9jb5\" (UniqueName: \"kubernetes.io/projected/d371271f-84c3-405c-b41f-604a06c1bb71-kube-api-access-l9jb5\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564248 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d371271f-84c3-405c-b41f-604a06c1bb71-ovn-node-metrics-cert\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564261 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-netd\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564279 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-log-socket\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564298 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-systemd-units\") pod \"d371271f-84c3-405c-b41f-604a06c1bb71\" (UID: \"d371271f-84c3-405c-b41f-604a06c1bb71\") " Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564329 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564358 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564426 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-run-netns\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564456 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-run-ovn-kubernetes\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564488 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-etc-openvswitch\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564540 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-log-socket\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564567 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-node-log\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564581 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-cni-netd\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564625 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-run-systemd\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564662 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgkp4\" (UniqueName: \"kubernetes.io/projected/977727d1-8b1a-4422-8677-c269ee767e46-kube-api-access-tgkp4\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564678 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-var-lib-openvswitch\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564692 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564722 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564738 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/977727d1-8b1a-4422-8677-c269ee767e46-ovn-node-metrics-cert\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564751 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-node-log" (OuterVolumeSpecName: "node-log") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564756 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-systemd-units\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564774 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564793 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/977727d1-8b1a-4422-8677-c269ee767e46-env-overrides\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564816 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/977727d1-8b1a-4422-8677-c269ee767e46-ovnkube-script-lib\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564847 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/977727d1-8b1a-4422-8677-c269ee767e46-ovnkube-config\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564870 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-run-ovn\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564885 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-slash\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564899 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-kubelet\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564920 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-run-openvswitch\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564934 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-cni-bin\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564971 4687 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564981 4687 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564989 4687 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564998 4687 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.565006 4687 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.565015 4687 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.565023 4687 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.565031 4687 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.565040 4687 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.565048 4687 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564774 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564795 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.564811 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.565169 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.565188 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-log-socket" (OuterVolumeSpecName: "log-socket") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.565202 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.565931 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.571684 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d371271f-84c3-405c-b41f-604a06c1bb71-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.571953 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d371271f-84c3-405c-b41f-604a06c1bb71-kube-api-access-l9jb5" (OuterVolumeSpecName: "kube-api-access-l9jb5") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "kube-api-access-l9jb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.578495 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "d371271f-84c3-405c-b41f-604a06c1bb71" (UID: "d371271f-84c3-405c-b41f-604a06c1bb71"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666440 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/977727d1-8b1a-4422-8677-c269ee767e46-ovn-node-metrics-cert\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666567 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-systemd-units\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666601 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/977727d1-8b1a-4422-8677-c269ee767e46-env-overrides\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666632 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666669 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/977727d1-8b1a-4422-8677-c269ee767e46-ovnkube-script-lib\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666705 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/977727d1-8b1a-4422-8677-c269ee767e46-ovnkube-config\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666728 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-systemd-units\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666738 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-slash\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666779 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-slash\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666813 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-run-ovn\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666845 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-kubelet\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666880 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-run-openvswitch\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666908 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-cni-bin\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666939 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-run-netns\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.666975 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-run-ovn-kubernetes\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667011 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-etc-openvswitch\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667051 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-log-socket\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667088 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-node-log\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667115 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-cni-netd\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667161 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-run-systemd\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667206 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgkp4\" (UniqueName: \"kubernetes.io/projected/977727d1-8b1a-4422-8677-c269ee767e46-kube-api-access-tgkp4\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667237 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-var-lib-openvswitch\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667313 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9jb5\" (UniqueName: \"kubernetes.io/projected/d371271f-84c3-405c-b41f-604a06c1bb71-kube-api-access-l9jb5\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667335 4687 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d371271f-84c3-405c-b41f-604a06c1bb71-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667392 4687 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667410 4687 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667428 4687 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667472 4687 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667491 4687 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667529 4687 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667546 4687 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d371271f-84c3-405c-b41f-604a06c1bb71-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667562 4687 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d371271f-84c3-405c-b41f-604a06c1bb71-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667610 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-var-lib-openvswitch\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667658 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-run-ovn\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667752 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-kubelet\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667793 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-run-openvswitch\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667791 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/977727d1-8b1a-4422-8677-c269ee767e46-env-overrides\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667831 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667853 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-cni-bin\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667894 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-run-netns\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667936 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-run-ovn-kubernetes\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.667975 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-etc-openvswitch\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.668016 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-log-socket\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.668054 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-node-log\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.668092 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-host-cni-netd\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.668130 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/977727d1-8b1a-4422-8677-c269ee767e46-run-systemd\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.668677 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/977727d1-8b1a-4422-8677-c269ee767e46-ovnkube-script-lib\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.669429 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/977727d1-8b1a-4422-8677-c269ee767e46-ovnkube-config\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.669769 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/977727d1-8b1a-4422-8677-c269ee767e46-ovn-node-metrics-cert\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.699981 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgkp4\" (UniqueName: \"kubernetes.io/projected/977727d1-8b1a-4422-8677-c269ee767e46-kube-api-access-tgkp4\") pod \"ovnkube-node-jjzng\" (UID: \"977727d1-8b1a-4422-8677-c269ee767e46\") " pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: I1125 09:15:46.793628 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:46 crc kubenswrapper[4687]: W1125 09:15:46.828139 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod977727d1_8b1a_4422_8677_c269ee767e46.slice/crio-a4834a7e51a1da9b5101efbbefbde1a6e8b4efbfe62e43eb8217914f20902c3d WatchSource:0}: Error finding container a4834a7e51a1da9b5101efbbefbde1a6e8b4efbfe62e43eb8217914f20902c3d: Status 404 returned error can't find the container with id a4834a7e51a1da9b5101efbbefbde1a6e8b4efbfe62e43eb8217914f20902c3d Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.069471 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovn-acl-logging/0.log" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.070149 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-p68hx_d371271f-84c3-405c-b41f-604a06c1bb71/ovn-controller/0.log" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.070687 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.070951 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p68hx" event={"ID":"d371271f-84c3-405c-b41f-604a06c1bb71","Type":"ContainerDied","Data":"45c46ddac61f3279e4ffbb7538198ae91eb749e696cf641bc4651d88a88977f6"} Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.071007 4687 scope.go:117] "RemoveContainer" containerID="58d2e36fa26a4e84c8f9778e4cd44e111aa6feda866a77e47f9ba7ff7501bcd0" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.079712 4687 generic.go:334] "Generic (PLEG): container finished" podID="977727d1-8b1a-4422-8677-c269ee767e46" containerID="2a96b794b253de7d8971d6b5f89f09d417fa8dfd951a14c1b46f777e5914735f" exitCode=0 Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.079802 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" event={"ID":"977727d1-8b1a-4422-8677-c269ee767e46","Type":"ContainerDied","Data":"2a96b794b253de7d8971d6b5f89f09d417fa8dfd951a14c1b46f777e5914735f"} Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.079861 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" event={"ID":"977727d1-8b1a-4422-8677-c269ee767e46","Type":"ContainerStarted","Data":"a4834a7e51a1da9b5101efbbefbde1a6e8b4efbfe62e43eb8217914f20902c3d"} Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.083045 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wlzrb_0e7c96e4-c7fa-466f-b0b6-495612ed71f8/kube-multus/2.log" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.083102 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wlzrb" event={"ID":"0e7c96e4-c7fa-466f-b0b6-495612ed71f8","Type":"ContainerStarted","Data":"d9bfa820028eeed248a450646f88cfdcf91405352fcd8968d1958df4584d4a4c"} Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.095390 4687 scope.go:117] "RemoveContainer" containerID="6a41d8bcccda9732c7fb836370bc90756d05bbf6f00edb5a2734952fa0407682" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.115016 4687 scope.go:117] "RemoveContainer" containerID="61e8c8b4333cc9fc5b95a960a6c4722f0c0b2fddc26d443b0fdd61e6cabea59b" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.122754 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-p68hx"] Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.129211 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-p68hx"] Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.136401 4687 scope.go:117] "RemoveContainer" containerID="4ff7b55ac4c8e3354deae80ce9adaf224e06f60fe9462ac4afab26bbd970205f" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.156693 4687 scope.go:117] "RemoveContainer" containerID="cf78a8b7d4c70d5561b625bef695fd051d6b2e5e399abcbccd3346d105d22b0f" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.189312 4687 scope.go:117] "RemoveContainer" containerID="66f53d363790940e87b9a15711ffe328b6b7f3f01d62fe1f3063860f871559e6" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.208746 4687 scope.go:117] "RemoveContainer" containerID="d9d9a3c93f7c8996dd259268e221af5faa723871e3b7f27f347f934fdf677a8d" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.232394 4687 scope.go:117] "RemoveContainer" containerID="b08d3cda9d49ddf0ad02d7f2971d73970202bf78093709122398fe6d13a39858" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.252050 4687 scope.go:117] "RemoveContainer" containerID="134e9872c6854aa3d866f1f99e0ddccfd116e8e0af02957e833b0a4a3d754c05" Nov 25 09:15:47 crc kubenswrapper[4687]: I1125 09:15:47.746180 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d371271f-84c3-405c-b41f-604a06c1bb71" path="/var/lib/kubelet/pods/d371271f-84c3-405c-b41f-604a06c1bb71/volumes" Nov 25 09:15:48 crc kubenswrapper[4687]: I1125 09:15:48.093216 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" event={"ID":"977727d1-8b1a-4422-8677-c269ee767e46","Type":"ContainerStarted","Data":"6c88738806a49bbd7743541c9dc4851763124d1bc65cec264adf79e1cedb09e2"} Nov 25 09:15:48 crc kubenswrapper[4687]: I1125 09:15:48.093266 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" event={"ID":"977727d1-8b1a-4422-8677-c269ee767e46","Type":"ContainerStarted","Data":"ea765beb86e5a188bc2422af7181efe5f4f08a50f0f570867ab4a6cee6f43a14"} Nov 25 09:15:48 crc kubenswrapper[4687]: I1125 09:15:48.093281 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" event={"ID":"977727d1-8b1a-4422-8677-c269ee767e46","Type":"ContainerStarted","Data":"86911e303be74f2981b01c761eddb0d900e2405ab065c1a055d6d3d1c5ce3c1d"} Nov 25 09:15:48 crc kubenswrapper[4687]: I1125 09:15:48.093293 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" event={"ID":"977727d1-8b1a-4422-8677-c269ee767e46","Type":"ContainerStarted","Data":"4fc62e1c79b885d2c786fcd26234f31a985e0c2ab54f8975286291df024ce05d"} Nov 25 09:15:48 crc kubenswrapper[4687]: I1125 09:15:48.093304 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" event={"ID":"977727d1-8b1a-4422-8677-c269ee767e46","Type":"ContainerStarted","Data":"5277f9c9c9f6ce0b183740cf668385ab90a567dca8f4786ab004d4c43936b6fb"} Nov 25 09:15:48 crc kubenswrapper[4687]: I1125 09:15:48.093318 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" event={"ID":"977727d1-8b1a-4422-8677-c269ee767e46","Type":"ContainerStarted","Data":"6bd506743859665339324d247fd111c898873e8575fc05ebfec0fbc198e673d2"} Nov 25 09:15:50 crc kubenswrapper[4687]: I1125 09:15:50.108724 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" event={"ID":"977727d1-8b1a-4422-8677-c269ee767e46","Type":"ContainerStarted","Data":"e32c84d0fd938c53bc62435716988906b049434b8460072978c08e2f6cebde3d"} Nov 25 09:15:50 crc kubenswrapper[4687]: I1125 09:15:50.466470 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-f22x8" Nov 25 09:15:53 crc kubenswrapper[4687]: I1125 09:15:53.130159 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" event={"ID":"977727d1-8b1a-4422-8677-c269ee767e46","Type":"ContainerStarted","Data":"e4c08ca68d341d23bc9e551d79c99450d8d6e0a43553b442f1c53018b6dad291"} Nov 25 09:15:53 crc kubenswrapper[4687]: I1125 09:15:53.131588 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:53 crc kubenswrapper[4687]: I1125 09:15:53.131613 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:53 crc kubenswrapper[4687]: I1125 09:15:53.161321 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" podStartSLOduration=7.161302114 podStartE2EDuration="7.161302114s" podCreationTimestamp="2025-11-25 09:15:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:15:53.158374343 +0000 UTC m=+748.212014081" watchObservedRunningTime="2025-11-25 09:15:53.161302114 +0000 UTC m=+748.214941842" Nov 25 09:15:53 crc kubenswrapper[4687]: I1125 09:15:53.166567 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:54 crc kubenswrapper[4687]: I1125 09:15:54.140758 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:15:54 crc kubenswrapper[4687]: I1125 09:15:54.174561 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:16:08 crc kubenswrapper[4687]: I1125 09:16:08.961245 4687 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.167306 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-98kfz"] Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.169027 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.184155 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-98kfz"] Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.317620 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-utilities\") pod \"redhat-operators-98kfz\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.317683 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-catalog-content\") pod \"redhat-operators-98kfz\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.317744 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q85g8\" (UniqueName: \"kubernetes.io/projected/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-kube-api-access-q85g8\") pod \"redhat-operators-98kfz\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.419279 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-utilities\") pod \"redhat-operators-98kfz\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.419372 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-catalog-content\") pod \"redhat-operators-98kfz\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.419450 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q85g8\" (UniqueName: \"kubernetes.io/projected/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-kube-api-access-q85g8\") pod \"redhat-operators-98kfz\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.420051 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-utilities\") pod \"redhat-operators-98kfz\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.420735 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-catalog-content\") pod \"redhat-operators-98kfz\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.440752 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q85g8\" (UniqueName: \"kubernetes.io/projected/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-kube-api-access-q85g8\") pod \"redhat-operators-98kfz\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.488324 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:14 crc kubenswrapper[4687]: I1125 09:16:14.761105 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-98kfz"] Nov 25 09:16:15 crc kubenswrapper[4687]: I1125 09:16:15.256565 4687 generic.go:334] "Generic (PLEG): container finished" podID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerID="deb95ac6083933966b8d419c782a000dee0b31cfbbf2d9accbd02b5d0475ed9a" exitCode=0 Nov 25 09:16:15 crc kubenswrapper[4687]: I1125 09:16:15.256687 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-98kfz" event={"ID":"c0a52ad3-45c4-4a3b-874a-6066b601f6c6","Type":"ContainerDied","Data":"deb95ac6083933966b8d419c782a000dee0b31cfbbf2d9accbd02b5d0475ed9a"} Nov 25 09:16:15 crc kubenswrapper[4687]: I1125 09:16:15.257131 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-98kfz" event={"ID":"c0a52ad3-45c4-4a3b-874a-6066b601f6c6","Type":"ContainerStarted","Data":"1f98987c783dc5b1d8d700d73233314a8c61b35c8843251f928b7f0211bb50d0"} Nov 25 09:16:16 crc kubenswrapper[4687]: I1125 09:16:16.822159 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jjzng" Nov 25 09:16:17 crc kubenswrapper[4687]: I1125 09:16:17.272329 4687 generic.go:334] "Generic (PLEG): container finished" podID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerID="151cc06cdd390798ce2a62d8f5af5056ab8dbefc0f9a94ffc3534a7067578977" exitCode=0 Nov 25 09:16:17 crc kubenswrapper[4687]: I1125 09:16:17.272379 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-98kfz" event={"ID":"c0a52ad3-45c4-4a3b-874a-6066b601f6c6","Type":"ContainerDied","Data":"151cc06cdd390798ce2a62d8f5af5056ab8dbefc0f9a94ffc3534a7067578977"} Nov 25 09:16:18 crc kubenswrapper[4687]: I1125 09:16:18.287091 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-98kfz" event={"ID":"c0a52ad3-45c4-4a3b-874a-6066b601f6c6","Type":"ContainerStarted","Data":"fd973bf967b55a1a3dec41fb1646f62425f3d4971f75001f076ac80e5e666552"} Nov 25 09:16:18 crc kubenswrapper[4687]: I1125 09:16:18.316955 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-98kfz" podStartSLOduration=1.558359556 podStartE2EDuration="4.316902139s" podCreationTimestamp="2025-11-25 09:16:14 +0000 UTC" firstStartedPulling="2025-11-25 09:16:15.261155059 +0000 UTC m=+770.314794777" lastFinishedPulling="2025-11-25 09:16:18.019697642 +0000 UTC m=+773.073337360" observedRunningTime="2025-11-25 09:16:18.305794783 +0000 UTC m=+773.359434541" watchObservedRunningTime="2025-11-25 09:16:18.316902139 +0000 UTC m=+773.370541957" Nov 25 09:16:23 crc kubenswrapper[4687]: I1125 09:16:23.844971 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:16:23 crc kubenswrapper[4687]: I1125 09:16:23.845829 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:16:24 crc kubenswrapper[4687]: I1125 09:16:24.489024 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:24 crc kubenswrapper[4687]: I1125 09:16:24.489077 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:24 crc kubenswrapper[4687]: I1125 09:16:24.569574 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:24 crc kubenswrapper[4687]: I1125 09:16:24.828406 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9p2n5"] Nov 25 09:16:24 crc kubenswrapper[4687]: I1125 09:16:24.830744 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:24 crc kubenswrapper[4687]: I1125 09:16:24.840707 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9p2n5"] Nov 25 09:16:24 crc kubenswrapper[4687]: I1125 09:16:24.963194 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfls9\" (UniqueName: \"kubernetes.io/projected/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-kube-api-access-zfls9\") pod \"community-operators-9p2n5\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:24 crc kubenswrapper[4687]: I1125 09:16:24.964411 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-utilities\") pod \"community-operators-9p2n5\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:24 crc kubenswrapper[4687]: I1125 09:16:24.964614 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-catalog-content\") pod \"community-operators-9p2n5\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:25 crc kubenswrapper[4687]: I1125 09:16:25.065277 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-utilities\") pod \"community-operators-9p2n5\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:25 crc kubenswrapper[4687]: I1125 09:16:25.065364 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-catalog-content\") pod \"community-operators-9p2n5\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:25 crc kubenswrapper[4687]: I1125 09:16:25.065392 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfls9\" (UniqueName: \"kubernetes.io/projected/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-kube-api-access-zfls9\") pod \"community-operators-9p2n5\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:25 crc kubenswrapper[4687]: I1125 09:16:25.065828 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-utilities\") pod \"community-operators-9p2n5\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:25 crc kubenswrapper[4687]: I1125 09:16:25.065959 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-catalog-content\") pod \"community-operators-9p2n5\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:25 crc kubenswrapper[4687]: I1125 09:16:25.085746 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfls9\" (UniqueName: \"kubernetes.io/projected/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-kube-api-access-zfls9\") pod \"community-operators-9p2n5\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:25 crc kubenswrapper[4687]: I1125 09:16:25.152513 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:25 crc kubenswrapper[4687]: I1125 09:16:25.371779 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:25 crc kubenswrapper[4687]: I1125 09:16:25.413095 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9p2n5"] Nov 25 09:16:26 crc kubenswrapper[4687]: I1125 09:16:26.333415 4687 generic.go:334] "Generic (PLEG): container finished" podID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerID="1d67f1cf45cc939af465a3160892e10014fbb551c0517fd882d3bb7920fbb5ce" exitCode=0 Nov 25 09:16:26 crc kubenswrapper[4687]: I1125 09:16:26.333464 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p2n5" event={"ID":"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9","Type":"ContainerDied","Data":"1d67f1cf45cc939af465a3160892e10014fbb551c0517fd882d3bb7920fbb5ce"} Nov 25 09:16:26 crc kubenswrapper[4687]: I1125 09:16:26.334282 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p2n5" event={"ID":"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9","Type":"ContainerStarted","Data":"059b7147dfb766b7196486489007d63368913d9f5392e6dac794d998fff16ea4"} Nov 25 09:16:27 crc kubenswrapper[4687]: I1125 09:16:27.609749 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-98kfz"] Nov 25 09:16:27 crc kubenswrapper[4687]: I1125 09:16:27.610354 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-98kfz" podUID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerName="registry-server" containerID="cri-o://fd973bf967b55a1a3dec41fb1646f62425f3d4971f75001f076ac80e5e666552" gracePeriod=2 Nov 25 09:16:28 crc kubenswrapper[4687]: I1125 09:16:28.346590 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p2n5" event={"ID":"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9","Type":"ContainerStarted","Data":"894be2b255fa163fcdc84abaee0fe6731ffe2a60b1d93d0526779ee315784277"} Nov 25 09:16:29 crc kubenswrapper[4687]: I1125 09:16:29.355035 4687 generic.go:334] "Generic (PLEG): container finished" podID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerID="fd973bf967b55a1a3dec41fb1646f62425f3d4971f75001f076ac80e5e666552" exitCode=0 Nov 25 09:16:29 crc kubenswrapper[4687]: I1125 09:16:29.355111 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-98kfz" event={"ID":"c0a52ad3-45c4-4a3b-874a-6066b601f6c6","Type":"ContainerDied","Data":"fd973bf967b55a1a3dec41fb1646f62425f3d4971f75001f076ac80e5e666552"} Nov 25 09:16:29 crc kubenswrapper[4687]: I1125 09:16:29.357469 4687 generic.go:334] "Generic (PLEG): container finished" podID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerID="894be2b255fa163fcdc84abaee0fe6731ffe2a60b1d93d0526779ee315784277" exitCode=0 Nov 25 09:16:29 crc kubenswrapper[4687]: I1125 09:16:29.357536 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p2n5" event={"ID":"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9","Type":"ContainerDied","Data":"894be2b255fa163fcdc84abaee0fe6731ffe2a60b1d93d0526779ee315784277"} Nov 25 09:16:29 crc kubenswrapper[4687]: I1125 09:16:29.777632 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:29 crc kubenswrapper[4687]: I1125 09:16:29.929348 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-catalog-content\") pod \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " Nov 25 09:16:29 crc kubenswrapper[4687]: I1125 09:16:29.929429 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-utilities\") pod \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " Nov 25 09:16:29 crc kubenswrapper[4687]: I1125 09:16:29.929523 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q85g8\" (UniqueName: \"kubernetes.io/projected/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-kube-api-access-q85g8\") pod \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\" (UID: \"c0a52ad3-45c4-4a3b-874a-6066b601f6c6\") " Nov 25 09:16:29 crc kubenswrapper[4687]: I1125 09:16:29.931012 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-utilities" (OuterVolumeSpecName: "utilities") pod "c0a52ad3-45c4-4a3b-874a-6066b601f6c6" (UID: "c0a52ad3-45c4-4a3b-874a-6066b601f6c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:16:29 crc kubenswrapper[4687]: I1125 09:16:29.937026 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-kube-api-access-q85g8" (OuterVolumeSpecName: "kube-api-access-q85g8") pod "c0a52ad3-45c4-4a3b-874a-6066b601f6c6" (UID: "c0a52ad3-45c4-4a3b-874a-6066b601f6c6"). InnerVolumeSpecName "kube-api-access-q85g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.024544 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c0a52ad3-45c4-4a3b-874a-6066b601f6c6" (UID: "c0a52ad3-45c4-4a3b-874a-6066b601f6c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.030818 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q85g8\" (UniqueName: \"kubernetes.io/projected/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-kube-api-access-q85g8\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.030858 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.030873 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a52ad3-45c4-4a3b-874a-6066b601f6c6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.365198 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-98kfz" event={"ID":"c0a52ad3-45c4-4a3b-874a-6066b601f6c6","Type":"ContainerDied","Data":"1f98987c783dc5b1d8d700d73233314a8c61b35c8843251f928b7f0211bb50d0"} Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.365274 4687 scope.go:117] "RemoveContainer" containerID="fd973bf967b55a1a3dec41fb1646f62425f3d4971f75001f076ac80e5e666552" Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.365223 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-98kfz" Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.381638 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p2n5" event={"ID":"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9","Type":"ContainerStarted","Data":"5c62eb373f0317ed681d93ab8f55ffd580ab7d99689486f23e7199a30173b880"} Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.400532 4687 scope.go:117] "RemoveContainer" containerID="151cc06cdd390798ce2a62d8f5af5056ab8dbefc0f9a94ffc3534a7067578977" Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.404444 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-98kfz"] Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.409598 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-98kfz"] Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.416610 4687 scope.go:117] "RemoveContainer" containerID="deb95ac6083933966b8d419c782a000dee0b31cfbbf2d9accbd02b5d0475ed9a" Nov 25 09:16:30 crc kubenswrapper[4687]: I1125 09:16:30.430342 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9p2n5" podStartSLOduration=2.9507130950000002 podStartE2EDuration="6.430319338s" podCreationTimestamp="2025-11-25 09:16:24 +0000 UTC" firstStartedPulling="2025-11-25 09:16:26.335553536 +0000 UTC m=+781.389193254" lastFinishedPulling="2025-11-25 09:16:29.815159759 +0000 UTC m=+784.868799497" observedRunningTime="2025-11-25 09:16:30.427257374 +0000 UTC m=+785.480897092" watchObservedRunningTime="2025-11-25 09:16:30.430319338 +0000 UTC m=+785.483959056" Nov 25 09:16:31 crc kubenswrapper[4687]: I1125 09:16:31.742369 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" path="/var/lib/kubelet/pods/c0a52ad3-45c4-4a3b-874a-6066b601f6c6/volumes" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.152950 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.153533 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.200918 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.415593 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pdpkn"] Nov 25 09:16:35 crc kubenswrapper[4687]: E1125 09:16:35.415809 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerName="extract-utilities" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.415820 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerName="extract-utilities" Nov 25 09:16:35 crc kubenswrapper[4687]: E1125 09:16:35.415831 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerName="extract-content" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.415838 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerName="extract-content" Nov 25 09:16:35 crc kubenswrapper[4687]: E1125 09:16:35.415845 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerName="registry-server" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.415850 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerName="registry-server" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.415936 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0a52ad3-45c4-4a3b-874a-6066b601f6c6" containerName="registry-server" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.416714 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.425201 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pdpkn"] Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.461434 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.506620 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-utilities\") pod \"certified-operators-pdpkn\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.506673 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4p8n\" (UniqueName: \"kubernetes.io/projected/d246a3ac-0f3f-422a-b903-0f4c06148bc7-kube-api-access-c4p8n\") pod \"certified-operators-pdpkn\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.506757 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-catalog-content\") pod \"certified-operators-pdpkn\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.607837 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-catalog-content\") pod \"certified-operators-pdpkn\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.608258 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-utilities\") pod \"certified-operators-pdpkn\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.608282 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4p8n\" (UniqueName: \"kubernetes.io/projected/d246a3ac-0f3f-422a-b903-0f4c06148bc7-kube-api-access-c4p8n\") pod \"certified-operators-pdpkn\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.609118 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-catalog-content\") pod \"certified-operators-pdpkn\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.609385 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-utilities\") pod \"certified-operators-pdpkn\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.635555 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4p8n\" (UniqueName: \"kubernetes.io/projected/d246a3ac-0f3f-422a-b903-0f4c06148bc7-kube-api-access-c4p8n\") pod \"certified-operators-pdpkn\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.733485 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:35 crc kubenswrapper[4687]: I1125 09:16:35.965029 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pdpkn"] Nov 25 09:16:36 crc kubenswrapper[4687]: I1125 09:16:36.416023 4687 generic.go:334] "Generic (PLEG): container finished" podID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerID="7867c38a993562b83b7fc7b3df0bf4c952a68e2aa5f305bc107f2f90eaf5da06" exitCode=0 Nov 25 09:16:36 crc kubenswrapper[4687]: I1125 09:16:36.416099 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdpkn" event={"ID":"d246a3ac-0f3f-422a-b903-0f4c06148bc7","Type":"ContainerDied","Data":"7867c38a993562b83b7fc7b3df0bf4c952a68e2aa5f305bc107f2f90eaf5da06"} Nov 25 09:16:36 crc kubenswrapper[4687]: I1125 09:16:36.416893 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdpkn" event={"ID":"d246a3ac-0f3f-422a-b903-0f4c06148bc7","Type":"ContainerStarted","Data":"5588cf4f474e30d388c3b717f600ebb216f1b9e0a6f0103f8b3ffa0e670233a3"} Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.426925 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdpkn" event={"ID":"d246a3ac-0f3f-422a-b903-0f4c06148bc7","Type":"ContainerDied","Data":"4d077abd07268ce26a10a92ef539bd911db0a49b30744bd2e28b25241439d7d0"} Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.426874 4687 generic.go:334] "Generic (PLEG): container finished" podID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerID="4d077abd07268ce26a10a92ef539bd911db0a49b30744bd2e28b25241439d7d0" exitCode=0 Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.460819 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7"] Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.462158 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.469768 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7"] Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.471528 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.531559 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.531642 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.531661 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdls5\" (UniqueName: \"kubernetes.io/projected/94d372f4-1e99-47c6-89f3-d56aaf08cde8-kube-api-access-sdls5\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.632697 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.632760 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdls5\" (UniqueName: \"kubernetes.io/projected/94d372f4-1e99-47c6-89f3-d56aaf08cde8-kube-api-access-sdls5\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.632912 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.633212 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.633641 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.654581 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdls5\" (UniqueName: \"kubernetes.io/projected/94d372f4-1e99-47c6-89f3-d56aaf08cde8-kube-api-access-sdls5\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:37 crc kubenswrapper[4687]: I1125 09:16:37.786192 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:38 crc kubenswrapper[4687]: I1125 09:16:38.016914 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7"] Nov 25 09:16:38 crc kubenswrapper[4687]: I1125 09:16:38.435890 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdpkn" event={"ID":"d246a3ac-0f3f-422a-b903-0f4c06148bc7","Type":"ContainerStarted","Data":"39385a063f5e38ab84b63cb948c9eccc30976faf34ca6affc9979046b01d558d"} Nov 25 09:16:38 crc kubenswrapper[4687]: I1125 09:16:38.437670 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" event={"ID":"94d372f4-1e99-47c6-89f3-d56aaf08cde8","Type":"ContainerStarted","Data":"3e24f6d8134328866e15a35ec3315218bf4fb1adb3dc6494750dd3be38c50c9d"} Nov 25 09:16:38 crc kubenswrapper[4687]: I1125 09:16:38.437737 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" event={"ID":"94d372f4-1e99-47c6-89f3-d56aaf08cde8","Type":"ContainerStarted","Data":"aa4c66bcc807029af8ead6b56f599b9fdf7a633f86a2cf2db8ec3b0b376b6264"} Nov 25 09:16:38 crc kubenswrapper[4687]: I1125 09:16:38.456925 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pdpkn" podStartSLOduration=1.820697062 podStartE2EDuration="3.456902943s" podCreationTimestamp="2025-11-25 09:16:35 +0000 UTC" firstStartedPulling="2025-11-25 09:16:36.417491595 +0000 UTC m=+791.471131323" lastFinishedPulling="2025-11-25 09:16:38.053697496 +0000 UTC m=+793.107337204" observedRunningTime="2025-11-25 09:16:38.454265071 +0000 UTC m=+793.507904789" watchObservedRunningTime="2025-11-25 09:16:38.456902943 +0000 UTC m=+793.510542691" Nov 25 09:16:38 crc kubenswrapper[4687]: I1125 09:16:38.809119 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9p2n5"] Nov 25 09:16:38 crc kubenswrapper[4687]: I1125 09:16:38.809328 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9p2n5" podUID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerName="registry-server" containerID="cri-o://5c62eb373f0317ed681d93ab8f55ffd580ab7d99689486f23e7199a30173b880" gracePeriod=2 Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.454867 4687 generic.go:334] "Generic (PLEG): container finished" podID="94d372f4-1e99-47c6-89f3-d56aaf08cde8" containerID="3e24f6d8134328866e15a35ec3315218bf4fb1adb3dc6494750dd3be38c50c9d" exitCode=0 Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.455015 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" event={"ID":"94d372f4-1e99-47c6-89f3-d56aaf08cde8","Type":"ContainerDied","Data":"3e24f6d8134328866e15a35ec3315218bf4fb1adb3dc6494750dd3be38c50c9d"} Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.460350 4687 generic.go:334] "Generic (PLEG): container finished" podID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerID="5c62eb373f0317ed681d93ab8f55ffd580ab7d99689486f23e7199a30173b880" exitCode=0 Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.460409 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p2n5" event={"ID":"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9","Type":"ContainerDied","Data":"5c62eb373f0317ed681d93ab8f55ffd580ab7d99689486f23e7199a30173b880"} Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.637047 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.777293 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-catalog-content\") pod \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.777420 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-utilities\") pod \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.777465 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfls9\" (UniqueName: \"kubernetes.io/projected/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-kube-api-access-zfls9\") pod \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\" (UID: \"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9\") " Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.779462 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-utilities" (OuterVolumeSpecName: "utilities") pod "75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" (UID: "75e9a46f-a9b6-4a91-b1c4-7e866446b2c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.786088 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-kube-api-access-zfls9" (OuterVolumeSpecName: "kube-api-access-zfls9") pod "75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" (UID: "75e9a46f-a9b6-4a91-b1c4-7e866446b2c9"). InnerVolumeSpecName "kube-api-access-zfls9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.828953 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" (UID: "75e9a46f-a9b6-4a91-b1c4-7e866446b2c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.879090 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.879147 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:40 crc kubenswrapper[4687]: I1125 09:16:40.879166 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfls9\" (UniqueName: \"kubernetes.io/projected/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9-kube-api-access-zfls9\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:41 crc kubenswrapper[4687]: I1125 09:16:41.467994 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9p2n5" event={"ID":"75e9a46f-a9b6-4a91-b1c4-7e866446b2c9","Type":"ContainerDied","Data":"059b7147dfb766b7196486489007d63368913d9f5392e6dac794d998fff16ea4"} Nov 25 09:16:41 crc kubenswrapper[4687]: I1125 09:16:41.468050 4687 scope.go:117] "RemoveContainer" containerID="5c62eb373f0317ed681d93ab8f55ffd580ab7d99689486f23e7199a30173b880" Nov 25 09:16:41 crc kubenswrapper[4687]: I1125 09:16:41.468049 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9p2n5" Nov 25 09:16:41 crc kubenswrapper[4687]: I1125 09:16:41.497987 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9p2n5"] Nov 25 09:16:41 crc kubenswrapper[4687]: I1125 09:16:41.503704 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9p2n5"] Nov 25 09:16:41 crc kubenswrapper[4687]: I1125 09:16:41.541162 4687 scope.go:117] "RemoveContainer" containerID="894be2b255fa163fcdc84abaee0fe6731ffe2a60b1d93d0526779ee315784277" Nov 25 09:16:41 crc kubenswrapper[4687]: I1125 09:16:41.565934 4687 scope.go:117] "RemoveContainer" containerID="1d67f1cf45cc939af465a3160892e10014fbb551c0517fd882d3bb7920fbb5ce" Nov 25 09:16:41 crc kubenswrapper[4687]: I1125 09:16:41.740532 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" path="/var/lib/kubelet/pods/75e9a46f-a9b6-4a91-b1c4-7e866446b2c9/volumes" Nov 25 09:16:42 crc kubenswrapper[4687]: I1125 09:16:42.476977 4687 generic.go:334] "Generic (PLEG): container finished" podID="94d372f4-1e99-47c6-89f3-d56aaf08cde8" containerID="36d477560d5bdd5ddb31dc5ff21cf074f62faff05c4526a3a8b8ac1a2751b16e" exitCode=0 Nov 25 09:16:42 crc kubenswrapper[4687]: I1125 09:16:42.477046 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" event={"ID":"94d372f4-1e99-47c6-89f3-d56aaf08cde8","Type":"ContainerDied","Data":"36d477560d5bdd5ddb31dc5ff21cf074f62faff05c4526a3a8b8ac1a2751b16e"} Nov 25 09:16:43 crc kubenswrapper[4687]: I1125 09:16:43.490423 4687 generic.go:334] "Generic (PLEG): container finished" podID="94d372f4-1e99-47c6-89f3-d56aaf08cde8" containerID="7b38c43e66224f56ae6cb5451a9a0ff045595d5756fa1baf61bc4e49c6dab2e5" exitCode=0 Nov 25 09:16:43 crc kubenswrapper[4687]: I1125 09:16:43.490460 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" event={"ID":"94d372f4-1e99-47c6-89f3-d56aaf08cde8","Type":"ContainerDied","Data":"7b38c43e66224f56ae6cb5451a9a0ff045595d5756fa1baf61bc4e49c6dab2e5"} Nov 25 09:16:44 crc kubenswrapper[4687]: I1125 09:16:44.777973 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:44 crc kubenswrapper[4687]: I1125 09:16:44.937576 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-util\") pod \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " Nov 25 09:16:44 crc kubenswrapper[4687]: I1125 09:16:44.937676 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-bundle\") pod \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " Nov 25 09:16:44 crc kubenswrapper[4687]: I1125 09:16:44.937737 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdls5\" (UniqueName: \"kubernetes.io/projected/94d372f4-1e99-47c6-89f3-d56aaf08cde8-kube-api-access-sdls5\") pod \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\" (UID: \"94d372f4-1e99-47c6-89f3-d56aaf08cde8\") " Nov 25 09:16:44 crc kubenswrapper[4687]: I1125 09:16:44.938401 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-bundle" (OuterVolumeSpecName: "bundle") pod "94d372f4-1e99-47c6-89f3-d56aaf08cde8" (UID: "94d372f4-1e99-47c6-89f3-d56aaf08cde8"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:16:44 crc kubenswrapper[4687]: I1125 09:16:44.942394 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94d372f4-1e99-47c6-89f3-d56aaf08cde8-kube-api-access-sdls5" (OuterVolumeSpecName: "kube-api-access-sdls5") pod "94d372f4-1e99-47c6-89f3-d56aaf08cde8" (UID: "94d372f4-1e99-47c6-89f3-d56aaf08cde8"). InnerVolumeSpecName "kube-api-access-sdls5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:16:44 crc kubenswrapper[4687]: I1125 09:16:44.948635 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-util" (OuterVolumeSpecName: "util") pod "94d372f4-1e99-47c6-89f3-d56aaf08cde8" (UID: "94d372f4-1e99-47c6-89f3-d56aaf08cde8"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:16:45 crc kubenswrapper[4687]: I1125 09:16:45.039009 4687 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:45 crc kubenswrapper[4687]: I1125 09:16:45.039317 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdls5\" (UniqueName: \"kubernetes.io/projected/94d372f4-1e99-47c6-89f3-d56aaf08cde8-kube-api-access-sdls5\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:45 crc kubenswrapper[4687]: I1125 09:16:45.039326 4687 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/94d372f4-1e99-47c6-89f3-d56aaf08cde8-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:45 crc kubenswrapper[4687]: I1125 09:16:45.521994 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" event={"ID":"94d372f4-1e99-47c6-89f3-d56aaf08cde8","Type":"ContainerDied","Data":"aa4c66bcc807029af8ead6b56f599b9fdf7a633f86a2cf2db8ec3b0b376b6264"} Nov 25 09:16:45 crc kubenswrapper[4687]: I1125 09:16:45.522336 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa4c66bcc807029af8ead6b56f599b9fdf7a633f86a2cf2db8ec3b0b376b6264" Nov 25 09:16:45 crc kubenswrapper[4687]: I1125 09:16:45.522078 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7" Nov 25 09:16:45 crc kubenswrapper[4687]: I1125 09:16:45.739063 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:45 crc kubenswrapper[4687]: I1125 09:16:45.739167 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:45 crc kubenswrapper[4687]: I1125 09:16:45.782863 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:46 crc kubenswrapper[4687]: I1125 09:16:46.591395 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.220125 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-vgd6d"] Nov 25 09:16:48 crc kubenswrapper[4687]: E1125 09:16:48.220789 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94d372f4-1e99-47c6-89f3-d56aaf08cde8" containerName="pull" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.220805 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="94d372f4-1e99-47c6-89f3-d56aaf08cde8" containerName="pull" Nov 25 09:16:48 crc kubenswrapper[4687]: E1125 09:16:48.220824 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerName="extract-content" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.220833 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerName="extract-content" Nov 25 09:16:48 crc kubenswrapper[4687]: E1125 09:16:48.220845 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerName="extract-utilities" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.220853 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerName="extract-utilities" Nov 25 09:16:48 crc kubenswrapper[4687]: E1125 09:16:48.220868 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerName="registry-server" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.220875 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerName="registry-server" Nov 25 09:16:48 crc kubenswrapper[4687]: E1125 09:16:48.220890 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94d372f4-1e99-47c6-89f3-d56aaf08cde8" containerName="util" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.220898 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="94d372f4-1e99-47c6-89f3-d56aaf08cde8" containerName="util" Nov 25 09:16:48 crc kubenswrapper[4687]: E1125 09:16:48.220910 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94d372f4-1e99-47c6-89f3-d56aaf08cde8" containerName="extract" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.220917 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="94d372f4-1e99-47c6-89f3-d56aaf08cde8" containerName="extract" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.221037 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="94d372f4-1e99-47c6-89f3-d56aaf08cde8" containerName="extract" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.221059 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="75e9a46f-a9b6-4a91-b1c4-7e866446b2c9" containerName="registry-server" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.221485 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-vgd6d" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.223640 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.223763 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.223890 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-bjmb7" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.231447 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-vgd6d"] Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.381962 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvtmg\" (UniqueName: \"kubernetes.io/projected/4e8ba187-a91b-4312-bf06-eb0c5f0e5bd9-kube-api-access-dvtmg\") pod \"nmstate-operator-557fdffb88-vgd6d\" (UID: \"4e8ba187-a91b-4312-bf06-eb0c5f0e5bd9\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-vgd6d" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.483467 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvtmg\" (UniqueName: \"kubernetes.io/projected/4e8ba187-a91b-4312-bf06-eb0c5f0e5bd9-kube-api-access-dvtmg\") pod \"nmstate-operator-557fdffb88-vgd6d\" (UID: \"4e8ba187-a91b-4312-bf06-eb0c5f0e5bd9\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-vgd6d" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.512124 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvtmg\" (UniqueName: \"kubernetes.io/projected/4e8ba187-a91b-4312-bf06-eb0c5f0e5bd9-kube-api-access-dvtmg\") pod \"nmstate-operator-557fdffb88-vgd6d\" (UID: \"4e8ba187-a91b-4312-bf06-eb0c5f0e5bd9\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-vgd6d" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.538350 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-vgd6d" Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.762733 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-vgd6d"] Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.810289 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pdpkn"] Nov 25 09:16:48 crc kubenswrapper[4687]: I1125 09:16:48.810614 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pdpkn" podUID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerName="registry-server" containerID="cri-o://39385a063f5e38ab84b63cb948c9eccc30976faf34ca6affc9979046b01d558d" gracePeriod=2 Nov 25 09:16:49 crc kubenswrapper[4687]: I1125 09:16:49.554790 4687 generic.go:334] "Generic (PLEG): container finished" podID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerID="39385a063f5e38ab84b63cb948c9eccc30976faf34ca6affc9979046b01d558d" exitCode=0 Nov 25 09:16:49 crc kubenswrapper[4687]: I1125 09:16:49.554887 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdpkn" event={"ID":"d246a3ac-0f3f-422a-b903-0f4c06148bc7","Type":"ContainerDied","Data":"39385a063f5e38ab84b63cb948c9eccc30976faf34ca6affc9979046b01d558d"} Nov 25 09:16:49 crc kubenswrapper[4687]: I1125 09:16:49.556405 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-vgd6d" event={"ID":"4e8ba187-a91b-4312-bf06-eb0c5f0e5bd9","Type":"ContainerStarted","Data":"e8434fd42b8c7a565b5061a60da47e94d766cefc41288a39e744cf47c1fd975d"} Nov 25 09:16:49 crc kubenswrapper[4687]: I1125 09:16:49.897411 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.021826 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-utilities\") pod \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.021953 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4p8n\" (UniqueName: \"kubernetes.io/projected/d246a3ac-0f3f-422a-b903-0f4c06148bc7-kube-api-access-c4p8n\") pod \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.021975 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-catalog-content\") pod \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\" (UID: \"d246a3ac-0f3f-422a-b903-0f4c06148bc7\") " Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.024666 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-utilities" (OuterVolumeSpecName: "utilities") pod "d246a3ac-0f3f-422a-b903-0f4c06148bc7" (UID: "d246a3ac-0f3f-422a-b903-0f4c06148bc7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.027998 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d246a3ac-0f3f-422a-b903-0f4c06148bc7-kube-api-access-c4p8n" (OuterVolumeSpecName: "kube-api-access-c4p8n") pod "d246a3ac-0f3f-422a-b903-0f4c06148bc7" (UID: "d246a3ac-0f3f-422a-b903-0f4c06148bc7"). InnerVolumeSpecName "kube-api-access-c4p8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.071814 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d246a3ac-0f3f-422a-b903-0f4c06148bc7" (UID: "d246a3ac-0f3f-422a-b903-0f4c06148bc7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.124368 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4p8n\" (UniqueName: \"kubernetes.io/projected/d246a3ac-0f3f-422a-b903-0f4c06148bc7-kube-api-access-c4p8n\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.124768 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.124788 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d246a3ac-0f3f-422a-b903-0f4c06148bc7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.563608 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdpkn" event={"ID":"d246a3ac-0f3f-422a-b903-0f4c06148bc7","Type":"ContainerDied","Data":"5588cf4f474e30d388c3b717f600ebb216f1b9e0a6f0103f8b3ffa0e670233a3"} Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.563655 4687 scope.go:117] "RemoveContainer" containerID="39385a063f5e38ab84b63cb948c9eccc30976faf34ca6affc9979046b01d558d" Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.563682 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdpkn" Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.583247 4687 scope.go:117] "RemoveContainer" containerID="4d077abd07268ce26a10a92ef539bd911db0a49b30744bd2e28b25241439d7d0" Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.591978 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pdpkn"] Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.602760 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pdpkn"] Nov 25 09:16:50 crc kubenswrapper[4687]: I1125 09:16:50.612338 4687 scope.go:117] "RemoveContainer" containerID="7867c38a993562b83b7fc7b3df0bf4c952a68e2aa5f305bc107f2f90eaf5da06" Nov 25 09:16:51 crc kubenswrapper[4687]: I1125 09:16:51.572316 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-vgd6d" event={"ID":"4e8ba187-a91b-4312-bf06-eb0c5f0e5bd9","Type":"ContainerStarted","Data":"64a4b1fbc68f683ee6e310084c9bea7bf337be61a59b67aef6ed0b8af68f5b1e"} Nov 25 09:16:51 crc kubenswrapper[4687]: I1125 09:16:51.595580 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-vgd6d" podStartSLOduration=1.786440245 podStartE2EDuration="3.595560187s" podCreationTimestamp="2025-11-25 09:16:48 +0000 UTC" firstStartedPulling="2025-11-25 09:16:48.775452531 +0000 UTC m=+803.829092249" lastFinishedPulling="2025-11-25 09:16:50.584572473 +0000 UTC m=+805.638212191" observedRunningTime="2025-11-25 09:16:51.593922503 +0000 UTC m=+806.647562261" watchObservedRunningTime="2025-11-25 09:16:51.595560187 +0000 UTC m=+806.649199905" Nov 25 09:16:51 crc kubenswrapper[4687]: I1125 09:16:51.744528 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" path="/var/lib/kubelet/pods/d246a3ac-0f3f-422a-b903-0f4c06148bc7/volumes" Nov 25 09:16:53 crc kubenswrapper[4687]: I1125 09:16:53.845169 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:16:53 crc kubenswrapper[4687]: I1125 09:16:53.845631 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:16:56 crc kubenswrapper[4687]: I1125 09:16:56.969063 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k"] Nov 25 09:16:56 crc kubenswrapper[4687]: E1125 09:16:56.969633 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerName="registry-server" Nov 25 09:16:56 crc kubenswrapper[4687]: I1125 09:16:56.969648 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerName="registry-server" Nov 25 09:16:56 crc kubenswrapper[4687]: E1125 09:16:56.969661 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerName="extract-content" Nov 25 09:16:56 crc kubenswrapper[4687]: I1125 09:16:56.969669 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerName="extract-content" Nov 25 09:16:56 crc kubenswrapper[4687]: E1125 09:16:56.969679 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerName="extract-utilities" Nov 25 09:16:56 crc kubenswrapper[4687]: I1125 09:16:56.969687 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerName="extract-utilities" Nov 25 09:16:56 crc kubenswrapper[4687]: I1125 09:16:56.969800 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d246a3ac-0f3f-422a-b903-0f4c06148bc7" containerName="registry-server" Nov 25 09:16:56 crc kubenswrapper[4687]: I1125 09:16:56.970371 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k" Nov 25 09:16:56 crc kubenswrapper[4687]: I1125 09:16:56.976847 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-hxcqz" Nov 25 09:16:56 crc kubenswrapper[4687]: I1125 09:16:56.988048 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k"] Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.005069 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9"] Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.005791 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.007910 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.035273 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9"] Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.055651 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-dj956"] Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.056493 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.114881 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdzzt\" (UniqueName: \"kubernetes.io/projected/e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989-kube-api-access-pdzzt\") pod \"nmstate-webhook-6b89b748d8-p89k9\" (UID: \"e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.114964 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-p89k9\" (UID: \"e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.115031 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgn5l\" (UniqueName: \"kubernetes.io/projected/5e374138-9c9c-41b4-a2d1-eab48197d4bb-kube-api-access-zgn5l\") pod \"nmstate-metrics-5dcf9c57c5-9n22k\" (UID: \"5e374138-9c9c-41b4-a2d1-eab48197d4bb\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.149084 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p"] Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.149933 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.151428 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.151597 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-cb2wt" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.151675 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.162420 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p"] Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.219225 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/39575ce1-8fae-41c6-8603-a4d49c101e7d-ovs-socket\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.219320 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-p89k9\" (UID: \"e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.219360 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/39575ce1-8fae-41c6-8603-a4d49c101e7d-dbus-socket\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.219398 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mz24\" (UniqueName: \"kubernetes.io/projected/39575ce1-8fae-41c6-8603-a4d49c101e7d-kube-api-access-7mz24\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.219435 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b82qc\" (UniqueName: \"kubernetes.io/projected/5197825b-263a-49cc-abde-f5863cac4989-kube-api-access-b82qc\") pod \"nmstate-console-plugin-5874bd7bc5-7zd5p\" (UID: \"5197825b-263a-49cc-abde-f5863cac4989\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: E1125 09:16:57.219456 4687 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.219473 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgn5l\" (UniqueName: \"kubernetes.io/projected/5e374138-9c9c-41b4-a2d1-eab48197d4bb-kube-api-access-zgn5l\") pod \"nmstate-metrics-5dcf9c57c5-9n22k\" (UID: \"5e374138-9c9c-41b4-a2d1-eab48197d4bb\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k" Nov 25 09:16:57 crc kubenswrapper[4687]: E1125 09:16:57.219551 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989-tls-key-pair podName:e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989 nodeName:}" failed. No retries permitted until 2025-11-25 09:16:57.719523805 +0000 UTC m=+812.773163523 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989-tls-key-pair") pod "nmstate-webhook-6b89b748d8-p89k9" (UID: "e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989") : secret "openshift-nmstate-webhook" not found Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.219660 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5197825b-263a-49cc-abde-f5863cac4989-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-7zd5p\" (UID: \"5197825b-263a-49cc-abde-f5863cac4989\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.219843 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/39575ce1-8fae-41c6-8603-a4d49c101e7d-nmstate-lock\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.219899 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5197825b-263a-49cc-abde-f5863cac4989-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-7zd5p\" (UID: \"5197825b-263a-49cc-abde-f5863cac4989\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.219953 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdzzt\" (UniqueName: \"kubernetes.io/projected/e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989-kube-api-access-pdzzt\") pod \"nmstate-webhook-6b89b748d8-p89k9\" (UID: \"e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.243731 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdzzt\" (UniqueName: \"kubernetes.io/projected/e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989-kube-api-access-pdzzt\") pod \"nmstate-webhook-6b89b748d8-p89k9\" (UID: \"e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.243818 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgn5l\" (UniqueName: \"kubernetes.io/projected/5e374138-9c9c-41b4-a2d1-eab48197d4bb-kube-api-access-zgn5l\") pod \"nmstate-metrics-5dcf9c57c5-9n22k\" (UID: \"5e374138-9c9c-41b4-a2d1-eab48197d4bb\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.295138 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.322080 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b82qc\" (UniqueName: \"kubernetes.io/projected/5197825b-263a-49cc-abde-f5863cac4989-kube-api-access-b82qc\") pod \"nmstate-console-plugin-5874bd7bc5-7zd5p\" (UID: \"5197825b-263a-49cc-abde-f5863cac4989\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.322154 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5197825b-263a-49cc-abde-f5863cac4989-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-7zd5p\" (UID: \"5197825b-263a-49cc-abde-f5863cac4989\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.322206 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/39575ce1-8fae-41c6-8603-a4d49c101e7d-nmstate-lock\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.322312 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5197825b-263a-49cc-abde-f5863cac4989-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-7zd5p\" (UID: \"5197825b-263a-49cc-abde-f5863cac4989\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.322414 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/39575ce1-8fae-41c6-8603-a4d49c101e7d-ovs-socket\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.322467 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/39575ce1-8fae-41c6-8603-a4d49c101e7d-dbus-socket\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.322494 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mz24\" (UniqueName: \"kubernetes.io/projected/39575ce1-8fae-41c6-8603-a4d49c101e7d-kube-api-access-7mz24\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: E1125 09:16:57.322332 4687 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 25 09:16:57 crc kubenswrapper[4687]: E1125 09:16:57.322978 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5197825b-263a-49cc-abde-f5863cac4989-plugin-serving-cert podName:5197825b-263a-49cc-abde-f5863cac4989 nodeName:}" failed. No retries permitted until 2025-11-25 09:16:57.822955101 +0000 UTC m=+812.876594819 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/5197825b-263a-49cc-abde-f5863cac4989-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-7zd5p" (UID: "5197825b-263a-49cc-abde-f5863cac4989") : secret "plugin-serving-cert" not found Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.323154 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/39575ce1-8fae-41c6-8603-a4d49c101e7d-ovs-socket\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.323379 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5197825b-263a-49cc-abde-f5863cac4989-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-7zd5p\" (UID: \"5197825b-263a-49cc-abde-f5863cac4989\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.323430 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/39575ce1-8fae-41c6-8603-a4d49c101e7d-nmstate-lock\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.323493 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/39575ce1-8fae-41c6-8603-a4d49c101e7d-dbus-socket\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.355628 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b82qc\" (UniqueName: \"kubernetes.io/projected/5197825b-263a-49cc-abde-f5863cac4989-kube-api-access-b82qc\") pod \"nmstate-console-plugin-5874bd7bc5-7zd5p\" (UID: \"5197825b-263a-49cc-abde-f5863cac4989\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.364864 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mz24\" (UniqueName: \"kubernetes.io/projected/39575ce1-8fae-41c6-8603-a4d49c101e7d-kube-api-access-7mz24\") pod \"nmstate-handler-dj956\" (UID: \"39575ce1-8fae-41c6-8603-a4d49c101e7d\") " pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.367329 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-796f78c94d-w5pr2"] Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.369765 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.382367 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-796f78c94d-w5pr2"] Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.397330 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.427708 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-oauth-serving-cert\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.427753 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-console-serving-cert\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.427839 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-trusted-ca-bundle\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.427861 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-service-ca\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.427896 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24gml\" (UniqueName: \"kubernetes.io/projected/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-kube-api-access-24gml\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.427937 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-console-oauth-config\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.427952 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-console-config\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.529385 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-trusted-ca-bundle\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.529725 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-service-ca\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.529752 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24gml\" (UniqueName: \"kubernetes.io/projected/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-kube-api-access-24gml\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.529775 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-console-oauth-config\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.529791 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-console-config\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.529832 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-oauth-serving-cert\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.529849 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-console-serving-cert\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.531371 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-console-config\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.532090 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-oauth-serving-cert\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.532353 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-service-ca\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.533571 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-console-oauth-config\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.535809 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-console-serving-cert\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.537704 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-trusted-ca-bundle\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.548267 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24gml\" (UniqueName: \"kubernetes.io/projected/c0d660fc-5a02-42f4-8eb3-38a9865fb8b2-kube-api-access-24gml\") pod \"console-796f78c94d-w5pr2\" (UID: \"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2\") " pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.606901 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-dj956" event={"ID":"39575ce1-8fae-41c6-8603-a4d49c101e7d","Type":"ContainerStarted","Data":"6dc41babc6c63d392892fa91d48a043ee5bdbafd98008f6ff5c4ede23e412969"} Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.732572 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-p89k9\" (UID: \"e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.740667 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-p89k9\" (UID: \"e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.747007 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.788345 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k"] Nov 25 09:16:57 crc kubenswrapper[4687]: W1125 09:16:57.795411 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e374138_9c9c_41b4_a2d1_eab48197d4bb.slice/crio-65c8641f6d7493887b9964b86abe749870ba5a966500b6c0b5e1ca62341f9bb8 WatchSource:0}: Error finding container 65c8641f6d7493887b9964b86abe749870ba5a966500b6c0b5e1ca62341f9bb8: Status 404 returned error can't find the container with id 65c8641f6d7493887b9964b86abe749870ba5a966500b6c0b5e1ca62341f9bb8 Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.834656 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5197825b-263a-49cc-abde-f5863cac4989-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-7zd5p\" (UID: \"5197825b-263a-49cc-abde-f5863cac4989\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.838803 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5197825b-263a-49cc-abde-f5863cac4989-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-7zd5p\" (UID: \"5197825b-263a-49cc-abde-f5863cac4989\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:57 crc kubenswrapper[4687]: I1125 09:16:57.933900 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:16:58 crc kubenswrapper[4687]: I1125 09:16:58.066030 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" Nov 25 09:16:58 crc kubenswrapper[4687]: I1125 09:16:58.103983 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9"] Nov 25 09:16:58 crc kubenswrapper[4687]: W1125 09:16:58.105566 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode6f4fd7b_09a3_44f9_9a7d_aaf6f7625989.slice/crio-a28521c5c2f63a06b37e3916a141115808f7febc0ec77ee2676caeb26aa5fe14 WatchSource:0}: Error finding container a28521c5c2f63a06b37e3916a141115808f7febc0ec77ee2676caeb26aa5fe14: Status 404 returned error can't find the container with id a28521c5c2f63a06b37e3916a141115808f7febc0ec77ee2676caeb26aa5fe14 Nov 25 09:16:58 crc kubenswrapper[4687]: I1125 09:16:58.137936 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-796f78c94d-w5pr2"] Nov 25 09:16:58 crc kubenswrapper[4687]: W1125 09:16:58.149168 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0d660fc_5a02_42f4_8eb3_38a9865fb8b2.slice/crio-96c8d70003cd43462fcd7f7dc5e2f726088a0066ae0d7299d64960a37fb807b6 WatchSource:0}: Error finding container 96c8d70003cd43462fcd7f7dc5e2f726088a0066ae0d7299d64960a37fb807b6: Status 404 returned error can't find the container with id 96c8d70003cd43462fcd7f7dc5e2f726088a0066ae0d7299d64960a37fb807b6 Nov 25 09:16:58 crc kubenswrapper[4687]: I1125 09:16:58.271436 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p"] Nov 25 09:16:58 crc kubenswrapper[4687]: W1125 09:16:58.281094 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5197825b_263a_49cc_abde_f5863cac4989.slice/crio-f308246b2d693e9ca293bc88481d3edf850e2fa76d20c8f22fa4fb79b353639c WatchSource:0}: Error finding container f308246b2d693e9ca293bc88481d3edf850e2fa76d20c8f22fa4fb79b353639c: Status 404 returned error can't find the container with id f308246b2d693e9ca293bc88481d3edf850e2fa76d20c8f22fa4fb79b353639c Nov 25 09:16:58 crc kubenswrapper[4687]: I1125 09:16:58.613411 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-796f78c94d-w5pr2" event={"ID":"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2","Type":"ContainerStarted","Data":"e6afb22823e44cbcc792db05384f7ea68a8d8a5c2c6d7f10fe1b969a84c4847e"} Nov 25 09:16:58 crc kubenswrapper[4687]: I1125 09:16:58.613752 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-796f78c94d-w5pr2" event={"ID":"c0d660fc-5a02-42f4-8eb3-38a9865fb8b2","Type":"ContainerStarted","Data":"96c8d70003cd43462fcd7f7dc5e2f726088a0066ae0d7299d64960a37fb807b6"} Nov 25 09:16:58 crc kubenswrapper[4687]: I1125 09:16:58.615422 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k" event={"ID":"5e374138-9c9c-41b4-a2d1-eab48197d4bb","Type":"ContainerStarted","Data":"65c8641f6d7493887b9964b86abe749870ba5a966500b6c0b5e1ca62341f9bb8"} Nov 25 09:16:58 crc kubenswrapper[4687]: I1125 09:16:58.616867 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" event={"ID":"5197825b-263a-49cc-abde-f5863cac4989","Type":"ContainerStarted","Data":"f308246b2d693e9ca293bc88481d3edf850e2fa76d20c8f22fa4fb79b353639c"} Nov 25 09:16:58 crc kubenswrapper[4687]: I1125 09:16:58.617660 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" event={"ID":"e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989","Type":"ContainerStarted","Data":"a28521c5c2f63a06b37e3916a141115808f7febc0ec77ee2676caeb26aa5fe14"} Nov 25 09:16:58 crc kubenswrapper[4687]: I1125 09:16:58.632663 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-796f78c94d-w5pr2" podStartSLOduration=1.632638017 podStartE2EDuration="1.632638017s" podCreationTimestamp="2025-11-25 09:16:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:16:58.629595213 +0000 UTC m=+813.683234931" watchObservedRunningTime="2025-11-25 09:16:58.632638017 +0000 UTC m=+813.686277735" Nov 25 09:17:00 crc kubenswrapper[4687]: I1125 09:17:00.638276 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" event={"ID":"5197825b-263a-49cc-abde-f5863cac4989","Type":"ContainerStarted","Data":"2fec5c39f39e6851ca5517f13ba6a408ef4620808f75174bf413c7e4e7a9e0dd"} Nov 25 09:17:00 crc kubenswrapper[4687]: I1125 09:17:00.642068 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" event={"ID":"e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989","Type":"ContainerStarted","Data":"fa2de12cfc4ef29fdd5a46d9b17c5931f5adc070573e35b24837a1db03e3c089"} Nov 25 09:17:00 crc kubenswrapper[4687]: I1125 09:17:00.642219 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:17:00 crc kubenswrapper[4687]: I1125 09:17:00.644757 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k" event={"ID":"5e374138-9c9c-41b4-a2d1-eab48197d4bb","Type":"ContainerStarted","Data":"6d09250010f9dd618387fa4f2c1956ec56e52bab15351231fe7d4173e4029142"} Nov 25 09:17:00 crc kubenswrapper[4687]: I1125 09:17:00.646317 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-dj956" event={"ID":"39575ce1-8fae-41c6-8603-a4d49c101e7d","Type":"ContainerStarted","Data":"2429a0f3fca1d5426783b23b2a30313b6943d6be23f34fbf0b91bccdecc4b131"} Nov 25 09:17:00 crc kubenswrapper[4687]: I1125 09:17:00.646537 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:17:00 crc kubenswrapper[4687]: I1125 09:17:00.654791 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-7zd5p" podStartSLOduration=1.5474995520000001 podStartE2EDuration="3.65476378s" podCreationTimestamp="2025-11-25 09:16:57 +0000 UTC" firstStartedPulling="2025-11-25 09:16:58.284748787 +0000 UTC m=+813.338388505" lastFinishedPulling="2025-11-25 09:17:00.392013015 +0000 UTC m=+815.445652733" observedRunningTime="2025-11-25 09:17:00.651817889 +0000 UTC m=+815.705457617" watchObservedRunningTime="2025-11-25 09:17:00.65476378 +0000 UTC m=+815.708403508" Nov 25 09:17:00 crc kubenswrapper[4687]: I1125 09:17:00.674313 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-dj956" podStartSLOduration=0.791288534 podStartE2EDuration="3.674295785s" podCreationTimestamp="2025-11-25 09:16:57 +0000 UTC" firstStartedPulling="2025-11-25 09:16:57.438936832 +0000 UTC m=+812.492576540" lastFinishedPulling="2025-11-25 09:17:00.321944073 +0000 UTC m=+815.375583791" observedRunningTime="2025-11-25 09:17:00.673842603 +0000 UTC m=+815.727482321" watchObservedRunningTime="2025-11-25 09:17:00.674295785 +0000 UTC m=+815.727935503" Nov 25 09:17:00 crc kubenswrapper[4687]: I1125 09:17:00.696184 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" podStartSLOduration=2.433471565 podStartE2EDuration="4.696165425s" podCreationTimestamp="2025-11-25 09:16:56 +0000 UTC" firstStartedPulling="2025-11-25 09:16:58.107539757 +0000 UTC m=+813.161179475" lastFinishedPulling="2025-11-25 09:17:00.370233597 +0000 UTC m=+815.423873335" observedRunningTime="2025-11-25 09:17:00.692188596 +0000 UTC m=+815.745828324" watchObservedRunningTime="2025-11-25 09:17:00.696165425 +0000 UTC m=+815.749805143" Nov 25 09:17:03 crc kubenswrapper[4687]: I1125 09:17:03.676286 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k" event={"ID":"5e374138-9c9c-41b4-a2d1-eab48197d4bb","Type":"ContainerStarted","Data":"f46504975dd07b5230eea389690e39cb3eecb479c33649e2b6f1497eb7f0496f"} Nov 25 09:17:03 crc kubenswrapper[4687]: I1125 09:17:03.711241 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-9n22k" podStartSLOduration=2.574390659 podStartE2EDuration="7.711204997s" podCreationTimestamp="2025-11-25 09:16:56 +0000 UTC" firstStartedPulling="2025-11-25 09:16:57.79876392 +0000 UTC m=+812.852403638" lastFinishedPulling="2025-11-25 09:17:02.935578248 +0000 UTC m=+817.989217976" observedRunningTime="2025-11-25 09:17:03.708170374 +0000 UTC m=+818.761810122" watchObservedRunningTime="2025-11-25 09:17:03.711204997 +0000 UTC m=+818.764844765" Nov 25 09:17:07 crc kubenswrapper[4687]: I1125 09:17:07.435866 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-dj956" Nov 25 09:17:07 crc kubenswrapper[4687]: I1125 09:17:07.747163 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:17:07 crc kubenswrapper[4687]: I1125 09:17:07.747210 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:17:07 crc kubenswrapper[4687]: I1125 09:17:07.752090 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:17:08 crc kubenswrapper[4687]: I1125 09:17:08.757400 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-796f78c94d-w5pr2" Nov 25 09:17:08 crc kubenswrapper[4687]: I1125 09:17:08.805817 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-zgglt"] Nov 25 09:17:17 crc kubenswrapper[4687]: I1125 09:17:17.941218 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-p89k9" Nov 25 09:17:23 crc kubenswrapper[4687]: I1125 09:17:23.845455 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:17:23 crc kubenswrapper[4687]: I1125 09:17:23.846146 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:17:23 crc kubenswrapper[4687]: I1125 09:17:23.846213 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:17:23 crc kubenswrapper[4687]: I1125 09:17:23.847153 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0b6f3d727e83b272772a62a183625cfadd2675aa7e49dfb9d67bfd134f3394f8"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:17:23 crc kubenswrapper[4687]: I1125 09:17:23.847442 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://0b6f3d727e83b272772a62a183625cfadd2675aa7e49dfb9d67bfd134f3394f8" gracePeriod=600 Nov 25 09:17:24 crc kubenswrapper[4687]: I1125 09:17:24.853955 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="0b6f3d727e83b272772a62a183625cfadd2675aa7e49dfb9d67bfd134f3394f8" exitCode=0 Nov 25 09:17:24 crc kubenswrapper[4687]: I1125 09:17:24.854394 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"0b6f3d727e83b272772a62a183625cfadd2675aa7e49dfb9d67bfd134f3394f8"} Nov 25 09:17:24 crc kubenswrapper[4687]: I1125 09:17:24.854420 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"775e1f554d9dd2a0b079c1ff7e2f05e88c335de1a345eef583910fe573bfcecf"} Nov 25 09:17:24 crc kubenswrapper[4687]: I1125 09:17:24.854437 4687 scope.go:117] "RemoveContainer" containerID="b8e887c987f853d9c47a668160efcae36234097479afa1ebd1cdcaf883e881ed" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.450907 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl"] Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.452274 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.455707 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.460645 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl"] Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.640558 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.640622 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4nrr\" (UniqueName: \"kubernetes.io/projected/3775e1e0-7599-47fa-b6af-872dff20eb0a-kube-api-access-g4nrr\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.640678 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.741836 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.741970 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.742017 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4nrr\" (UniqueName: \"kubernetes.io/projected/3775e1e0-7599-47fa-b6af-872dff20eb0a-kube-api-access-g4nrr\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.742677 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.742752 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:31 crc kubenswrapper[4687]: I1125 09:17:31.773838 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4nrr\" (UniqueName: \"kubernetes.io/projected/3775e1e0-7599-47fa-b6af-872dff20eb0a-kube-api-access-g4nrr\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:32 crc kubenswrapper[4687]: I1125 09:17:32.066544 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:32 crc kubenswrapper[4687]: I1125 09:17:32.490750 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl"] Nov 25 09:17:32 crc kubenswrapper[4687]: W1125 09:17:32.498940 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3775e1e0_7599_47fa_b6af_872dff20eb0a.slice/crio-586d736864cdaa9ab961294b956ef6e469cff5898aee0a01b9b08a864f06c7e3 WatchSource:0}: Error finding container 586d736864cdaa9ab961294b956ef6e469cff5898aee0a01b9b08a864f06c7e3: Status 404 returned error can't find the container with id 586d736864cdaa9ab961294b956ef6e469cff5898aee0a01b9b08a864f06c7e3 Nov 25 09:17:32 crc kubenswrapper[4687]: I1125 09:17:32.920429 4687 generic.go:334] "Generic (PLEG): container finished" podID="3775e1e0-7599-47fa-b6af-872dff20eb0a" containerID="2f71b0a1475499dba5d25271a6288b9662372d0c02fae2d27455e5ff70bfb206" exitCode=0 Nov 25 09:17:32 crc kubenswrapper[4687]: I1125 09:17:32.920488 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" event={"ID":"3775e1e0-7599-47fa-b6af-872dff20eb0a","Type":"ContainerDied","Data":"2f71b0a1475499dba5d25271a6288b9662372d0c02fae2d27455e5ff70bfb206"} Nov 25 09:17:32 crc kubenswrapper[4687]: I1125 09:17:32.920997 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" event={"ID":"3775e1e0-7599-47fa-b6af-872dff20eb0a","Type":"ContainerStarted","Data":"586d736864cdaa9ab961294b956ef6e469cff5898aee0a01b9b08a864f06c7e3"} Nov 25 09:17:33 crc kubenswrapper[4687]: I1125 09:17:33.843713 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-zgglt" podUID="fbe26cf0-9829-4e16-b4c6-24484b1e678a" containerName="console" containerID="cri-o://5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1" gracePeriod=15 Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.192325 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-zgglt_fbe26cf0-9829-4e16-b4c6-24484b1e678a/console/0.log" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.192623 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.382813 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-config\") pod \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.383240 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-trusted-ca-bundle\") pod \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.383272 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-oauth-config\") pod \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.383335 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-serving-cert\") pod \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.383407 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwpbx\" (UniqueName: \"kubernetes.io/projected/fbe26cf0-9829-4e16-b4c6-24484b1e678a-kube-api-access-cwpbx\") pod \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.383434 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-oauth-serving-cert\") pod \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.383471 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-service-ca\") pod \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\" (UID: \"fbe26cf0-9829-4e16-b4c6-24484b1e678a\") " Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.384574 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "fbe26cf0-9829-4e16-b4c6-24484b1e678a" (UID: "fbe26cf0-9829-4e16-b4c6-24484b1e678a"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.384756 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "fbe26cf0-9829-4e16-b4c6-24484b1e678a" (UID: "fbe26cf0-9829-4e16-b4c6-24484b1e678a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.384976 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-config" (OuterVolumeSpecName: "console-config") pod "fbe26cf0-9829-4e16-b4c6-24484b1e678a" (UID: "fbe26cf0-9829-4e16-b4c6-24484b1e678a"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.385122 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-service-ca" (OuterVolumeSpecName: "service-ca") pod "fbe26cf0-9829-4e16-b4c6-24484b1e678a" (UID: "fbe26cf0-9829-4e16-b4c6-24484b1e678a"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.390166 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "fbe26cf0-9829-4e16-b4c6-24484b1e678a" (UID: "fbe26cf0-9829-4e16-b4c6-24484b1e678a"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.391087 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "fbe26cf0-9829-4e16-b4c6-24484b1e678a" (UID: "fbe26cf0-9829-4e16-b4c6-24484b1e678a"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.391288 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbe26cf0-9829-4e16-b4c6-24484b1e678a-kube-api-access-cwpbx" (OuterVolumeSpecName: "kube-api-access-cwpbx") pod "fbe26cf0-9829-4e16-b4c6-24484b1e678a" (UID: "fbe26cf0-9829-4e16-b4c6-24484b1e678a"). InnerVolumeSpecName "kube-api-access-cwpbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.485587 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwpbx\" (UniqueName: \"kubernetes.io/projected/fbe26cf0-9829-4e16-b4c6-24484b1e678a-kube-api-access-cwpbx\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.485862 4687 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.485929 4687 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.485994 4687 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.486046 4687 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbe26cf0-9829-4e16-b4c6-24484b1e678a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.486096 4687 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.486157 4687 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbe26cf0-9829-4e16-b4c6-24484b1e678a-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.938065 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-zgglt_fbe26cf0-9829-4e16-b4c6-24484b1e678a/console/0.log" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.938163 4687 generic.go:334] "Generic (PLEG): container finished" podID="fbe26cf0-9829-4e16-b4c6-24484b1e678a" containerID="5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1" exitCode=2 Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.938217 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-zgglt" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.938284 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-zgglt" event={"ID":"fbe26cf0-9829-4e16-b4c6-24484b1e678a","Type":"ContainerDied","Data":"5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1"} Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.938338 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-zgglt" event={"ID":"fbe26cf0-9829-4e16-b4c6-24484b1e678a","Type":"ContainerDied","Data":"d907ee286e1eb1e87a0a2cc250d13a7d423eb6b7c88736d63e8cbd20460c236f"} Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.938399 4687 scope.go:117] "RemoveContainer" containerID="5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.942141 4687 generic.go:334] "Generic (PLEG): container finished" podID="3775e1e0-7599-47fa-b6af-872dff20eb0a" containerID="6ab073d6fcf453488590073163d6afdea42aa28b4cb6433a5eba7fe7f765a371" exitCode=0 Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.942219 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" event={"ID":"3775e1e0-7599-47fa-b6af-872dff20eb0a","Type":"ContainerDied","Data":"6ab073d6fcf453488590073163d6afdea42aa28b4cb6433a5eba7fe7f765a371"} Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.976813 4687 scope.go:117] "RemoveContainer" containerID="5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1" Nov 25 09:17:34 crc kubenswrapper[4687]: E1125 09:17:34.978565 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1\": container with ID starting with 5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1 not found: ID does not exist" containerID="5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.978597 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1"} err="failed to get container status \"5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1\": rpc error: code = NotFound desc = could not find container \"5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1\": container with ID starting with 5b9dd483812d8925685db1cf75821344c58502eb1d554335a02f4a2da1e14ae1 not found: ID does not exist" Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.988025 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-zgglt"] Nov 25 09:17:34 crc kubenswrapper[4687]: I1125 09:17:34.993176 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-zgglt"] Nov 25 09:17:35 crc kubenswrapper[4687]: I1125 09:17:35.746337 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbe26cf0-9829-4e16-b4c6-24484b1e678a" path="/var/lib/kubelet/pods/fbe26cf0-9829-4e16-b4c6-24484b1e678a/volumes" Nov 25 09:17:35 crc kubenswrapper[4687]: I1125 09:17:35.949710 4687 generic.go:334] "Generic (PLEG): container finished" podID="3775e1e0-7599-47fa-b6af-872dff20eb0a" containerID="55e5d9984f4270f2d42370e6f4a4b7980b096d6bd9be43de5ae15d938de123a4" exitCode=0 Nov 25 09:17:35 crc kubenswrapper[4687]: I1125 09:17:35.949864 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" event={"ID":"3775e1e0-7599-47fa-b6af-872dff20eb0a","Type":"ContainerDied","Data":"55e5d9984f4270f2d42370e6f4a4b7980b096d6bd9be43de5ae15d938de123a4"} Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.248712 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.327651 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-util\") pod \"3775e1e0-7599-47fa-b6af-872dff20eb0a\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.327820 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4nrr\" (UniqueName: \"kubernetes.io/projected/3775e1e0-7599-47fa-b6af-872dff20eb0a-kube-api-access-g4nrr\") pod \"3775e1e0-7599-47fa-b6af-872dff20eb0a\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.328608 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-bundle\") pod \"3775e1e0-7599-47fa-b6af-872dff20eb0a\" (UID: \"3775e1e0-7599-47fa-b6af-872dff20eb0a\") " Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.329545 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-bundle" (OuterVolumeSpecName: "bundle") pod "3775e1e0-7599-47fa-b6af-872dff20eb0a" (UID: "3775e1e0-7599-47fa-b6af-872dff20eb0a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.332947 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3775e1e0-7599-47fa-b6af-872dff20eb0a-kube-api-access-g4nrr" (OuterVolumeSpecName: "kube-api-access-g4nrr") pod "3775e1e0-7599-47fa-b6af-872dff20eb0a" (UID: "3775e1e0-7599-47fa-b6af-872dff20eb0a"). InnerVolumeSpecName "kube-api-access-g4nrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.344109 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-util" (OuterVolumeSpecName: "util") pod "3775e1e0-7599-47fa-b6af-872dff20eb0a" (UID: "3775e1e0-7599-47fa-b6af-872dff20eb0a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.429549 4687 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.429595 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4nrr\" (UniqueName: \"kubernetes.io/projected/3775e1e0-7599-47fa-b6af-872dff20eb0a-kube-api-access-g4nrr\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.429615 4687 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3775e1e0-7599-47fa-b6af-872dff20eb0a-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.968059 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" event={"ID":"3775e1e0-7599-47fa-b6af-872dff20eb0a","Type":"ContainerDied","Data":"586d736864cdaa9ab961294b956ef6e469cff5898aee0a01b9b08a864f06c7e3"} Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.968099 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="586d736864cdaa9ab961294b956ef6e469cff5898aee0a01b9b08a864f06c7e3" Nov 25 09:17:37 crc kubenswrapper[4687]: I1125 09:17:37.968167 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.081908 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n"] Nov 25 09:17:49 crc kubenswrapper[4687]: E1125 09:17:49.082735 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3775e1e0-7599-47fa-b6af-872dff20eb0a" containerName="extract" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.082751 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3775e1e0-7599-47fa-b6af-872dff20eb0a" containerName="extract" Nov 25 09:17:49 crc kubenswrapper[4687]: E1125 09:17:49.082762 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbe26cf0-9829-4e16-b4c6-24484b1e678a" containerName="console" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.082770 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbe26cf0-9829-4e16-b4c6-24484b1e678a" containerName="console" Nov 25 09:17:49 crc kubenswrapper[4687]: E1125 09:17:49.082794 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3775e1e0-7599-47fa-b6af-872dff20eb0a" containerName="pull" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.082802 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3775e1e0-7599-47fa-b6af-872dff20eb0a" containerName="pull" Nov 25 09:17:49 crc kubenswrapper[4687]: E1125 09:17:49.082813 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3775e1e0-7599-47fa-b6af-872dff20eb0a" containerName="util" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.082820 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3775e1e0-7599-47fa-b6af-872dff20eb0a" containerName="util" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.082955 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbe26cf0-9829-4e16-b4c6-24484b1e678a" containerName="console" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.082967 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="3775e1e0-7599-47fa-b6af-872dff20eb0a" containerName="extract" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.083419 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.085249 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.085249 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.085751 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-8l7d9" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.088793 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.088791 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.098846 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n"] Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.172970 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/66122fe0-e231-48e6-8051-a04d330d8f17-webhook-cert\") pod \"metallb-operator-controller-manager-558db5dd86-fc58n\" (UID: \"66122fe0-e231-48e6-8051-a04d330d8f17\") " pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.173129 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/66122fe0-e231-48e6-8051-a04d330d8f17-apiservice-cert\") pod \"metallb-operator-controller-manager-558db5dd86-fc58n\" (UID: \"66122fe0-e231-48e6-8051-a04d330d8f17\") " pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.173188 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59l4v\" (UniqueName: \"kubernetes.io/projected/66122fe0-e231-48e6-8051-a04d330d8f17-kube-api-access-59l4v\") pod \"metallb-operator-controller-manager-558db5dd86-fc58n\" (UID: \"66122fe0-e231-48e6-8051-a04d330d8f17\") " pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.273861 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/66122fe0-e231-48e6-8051-a04d330d8f17-apiservice-cert\") pod \"metallb-operator-controller-manager-558db5dd86-fc58n\" (UID: \"66122fe0-e231-48e6-8051-a04d330d8f17\") " pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.273909 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59l4v\" (UniqueName: \"kubernetes.io/projected/66122fe0-e231-48e6-8051-a04d330d8f17-kube-api-access-59l4v\") pod \"metallb-operator-controller-manager-558db5dd86-fc58n\" (UID: \"66122fe0-e231-48e6-8051-a04d330d8f17\") " pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.273943 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/66122fe0-e231-48e6-8051-a04d330d8f17-webhook-cert\") pod \"metallb-operator-controller-manager-558db5dd86-fc58n\" (UID: \"66122fe0-e231-48e6-8051-a04d330d8f17\") " pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.280297 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/66122fe0-e231-48e6-8051-a04d330d8f17-apiservice-cert\") pod \"metallb-operator-controller-manager-558db5dd86-fc58n\" (UID: \"66122fe0-e231-48e6-8051-a04d330d8f17\") " pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.286392 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/66122fe0-e231-48e6-8051-a04d330d8f17-webhook-cert\") pod \"metallb-operator-controller-manager-558db5dd86-fc58n\" (UID: \"66122fe0-e231-48e6-8051-a04d330d8f17\") " pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.290479 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59l4v\" (UniqueName: \"kubernetes.io/projected/66122fe0-e231-48e6-8051-a04d330d8f17-kube-api-access-59l4v\") pod \"metallb-operator-controller-manager-558db5dd86-fc58n\" (UID: \"66122fe0-e231-48e6-8051-a04d330d8f17\") " pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.400211 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.433644 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5"] Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.434874 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.437925 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.438299 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-rwn27" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.438478 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.446201 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5"] Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.475616 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ed9d933-29d2-4e13-bb9b-377cdc8cf10a-apiservice-cert\") pod \"metallb-operator-webhook-server-84db77dcc8-bjgl5\" (UID: \"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a\") " pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.475661 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ed9d933-29d2-4e13-bb9b-377cdc8cf10a-webhook-cert\") pod \"metallb-operator-webhook-server-84db77dcc8-bjgl5\" (UID: \"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a\") " pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.475681 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjhv9\" (UniqueName: \"kubernetes.io/projected/8ed9d933-29d2-4e13-bb9b-377cdc8cf10a-kube-api-access-xjhv9\") pod \"metallb-operator-webhook-server-84db77dcc8-bjgl5\" (UID: \"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a\") " pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.577887 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ed9d933-29d2-4e13-bb9b-377cdc8cf10a-apiservice-cert\") pod \"metallb-operator-webhook-server-84db77dcc8-bjgl5\" (UID: \"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a\") " pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.577928 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ed9d933-29d2-4e13-bb9b-377cdc8cf10a-webhook-cert\") pod \"metallb-operator-webhook-server-84db77dcc8-bjgl5\" (UID: \"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a\") " pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.577964 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjhv9\" (UniqueName: \"kubernetes.io/projected/8ed9d933-29d2-4e13-bb9b-377cdc8cf10a-kube-api-access-xjhv9\") pod \"metallb-operator-webhook-server-84db77dcc8-bjgl5\" (UID: \"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a\") " pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.591525 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ed9d933-29d2-4e13-bb9b-377cdc8cf10a-apiservice-cert\") pod \"metallb-operator-webhook-server-84db77dcc8-bjgl5\" (UID: \"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a\") " pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.601093 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjhv9\" (UniqueName: \"kubernetes.io/projected/8ed9d933-29d2-4e13-bb9b-377cdc8cf10a-kube-api-access-xjhv9\") pod \"metallb-operator-webhook-server-84db77dcc8-bjgl5\" (UID: \"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a\") " pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.618952 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ed9d933-29d2-4e13-bb9b-377cdc8cf10a-webhook-cert\") pod \"metallb-operator-webhook-server-84db77dcc8-bjgl5\" (UID: \"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a\") " pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.686495 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n"] Nov 25 09:17:49 crc kubenswrapper[4687]: W1125 09:17:49.690853 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66122fe0_e231_48e6_8051_a04d330d8f17.slice/crio-d16728a1d06c53a9e175e1b1fc4bf81a4f627a3f6dc7265ec978df740e20f0fa WatchSource:0}: Error finding container d16728a1d06c53a9e175e1b1fc4bf81a4f627a3f6dc7265ec978df740e20f0fa: Status 404 returned error can't find the container with id d16728a1d06c53a9e175e1b1fc4bf81a4f627a3f6dc7265ec978df740e20f0fa Nov 25 09:17:49 crc kubenswrapper[4687]: I1125 09:17:49.772235 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:50 crc kubenswrapper[4687]: I1125 09:17:50.037776 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" event={"ID":"66122fe0-e231-48e6-8051-a04d330d8f17","Type":"ContainerStarted","Data":"d16728a1d06c53a9e175e1b1fc4bf81a4f627a3f6dc7265ec978df740e20f0fa"} Nov 25 09:17:50 crc kubenswrapper[4687]: I1125 09:17:50.197769 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5"] Nov 25 09:17:50 crc kubenswrapper[4687]: W1125 09:17:50.202705 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ed9d933_29d2_4e13_bb9b_377cdc8cf10a.slice/crio-4d814b047aedf18b46c78fe406ee04c86d064eb52de93f72233edcb4152a8461 WatchSource:0}: Error finding container 4d814b047aedf18b46c78fe406ee04c86d064eb52de93f72233edcb4152a8461: Status 404 returned error can't find the container with id 4d814b047aedf18b46c78fe406ee04c86d064eb52de93f72233edcb4152a8461 Nov 25 09:17:51 crc kubenswrapper[4687]: I1125 09:17:51.045221 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" event={"ID":"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a","Type":"ContainerStarted","Data":"4d814b047aedf18b46c78fe406ee04c86d064eb52de93f72233edcb4152a8461"} Nov 25 09:17:53 crc kubenswrapper[4687]: I1125 09:17:53.067372 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" event={"ID":"66122fe0-e231-48e6-8051-a04d330d8f17","Type":"ContainerStarted","Data":"52d74dbe405bdea78336c338508aa99dfa0f806804df4f707ffcd1c4f93b8440"} Nov 25 09:17:53 crc kubenswrapper[4687]: I1125 09:17:53.068109 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:17:53 crc kubenswrapper[4687]: I1125 09:17:53.098148 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" podStartSLOduration=0.995493172 podStartE2EDuration="4.098130214s" podCreationTimestamp="2025-11-25 09:17:49 +0000 UTC" firstStartedPulling="2025-11-25 09:17:49.693098284 +0000 UTC m=+864.746737992" lastFinishedPulling="2025-11-25 09:17:52.795735316 +0000 UTC m=+867.849375034" observedRunningTime="2025-11-25 09:17:53.097117606 +0000 UTC m=+868.150757324" watchObservedRunningTime="2025-11-25 09:17:53.098130214 +0000 UTC m=+868.151769932" Nov 25 09:17:55 crc kubenswrapper[4687]: I1125 09:17:55.080219 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" event={"ID":"8ed9d933-29d2-4e13-bb9b-377cdc8cf10a","Type":"ContainerStarted","Data":"a8cbb4551c600cc327e3912259bffef3f908d862e92576148248528a817aaf16"} Nov 25 09:17:56 crc kubenswrapper[4687]: I1125 09:17:56.084992 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:17:56 crc kubenswrapper[4687]: I1125 09:17:56.101306 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" podStartSLOduration=2.449872346 podStartE2EDuration="7.101284579s" podCreationTimestamp="2025-11-25 09:17:49 +0000 UTC" firstStartedPulling="2025-11-25 09:17:50.205417616 +0000 UTC m=+865.259057334" lastFinishedPulling="2025-11-25 09:17:54.856829849 +0000 UTC m=+869.910469567" observedRunningTime="2025-11-25 09:17:56.099654155 +0000 UTC m=+871.153293913" watchObservedRunningTime="2025-11-25 09:17:56.101284579 +0000 UTC m=+871.154924297" Nov 25 09:18:09 crc kubenswrapper[4687]: I1125 09:18:09.777935 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-84db77dcc8-bjgl5" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.378553 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jrvrv"] Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.380178 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.395228 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrvrv"] Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.557901 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-catalog-content\") pod \"redhat-marketplace-jrvrv\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.557961 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-utilities\") pod \"redhat-marketplace-jrvrv\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.558004 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fd696\" (UniqueName: \"kubernetes.io/projected/7494503f-86a1-4365-92d4-91a47f6f6d03-kube-api-access-fd696\") pod \"redhat-marketplace-jrvrv\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.658891 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-catalog-content\") pod \"redhat-marketplace-jrvrv\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.658945 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-utilities\") pod \"redhat-marketplace-jrvrv\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.658978 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fd696\" (UniqueName: \"kubernetes.io/projected/7494503f-86a1-4365-92d4-91a47f6f6d03-kube-api-access-fd696\") pod \"redhat-marketplace-jrvrv\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.659887 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-catalog-content\") pod \"redhat-marketplace-jrvrv\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.660232 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-utilities\") pod \"redhat-marketplace-jrvrv\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.688753 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fd696\" (UniqueName: \"kubernetes.io/projected/7494503f-86a1-4365-92d4-91a47f6f6d03-kube-api-access-fd696\") pod \"redhat-marketplace-jrvrv\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:10 crc kubenswrapper[4687]: I1125 09:18:10.696695 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:11 crc kubenswrapper[4687]: I1125 09:18:11.111810 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrvrv"] Nov 25 09:18:11 crc kubenswrapper[4687]: I1125 09:18:11.175162 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrvrv" event={"ID":"7494503f-86a1-4365-92d4-91a47f6f6d03","Type":"ContainerStarted","Data":"be6da64e6e95c109a3f8c8dfc3faec267af3e55bbc07abdee23f8bdf47789a4e"} Nov 25 09:18:12 crc kubenswrapper[4687]: I1125 09:18:12.184764 4687 generic.go:334] "Generic (PLEG): container finished" podID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerID="1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f" exitCode=0 Nov 25 09:18:12 crc kubenswrapper[4687]: I1125 09:18:12.184836 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrvrv" event={"ID":"7494503f-86a1-4365-92d4-91a47f6f6d03","Type":"ContainerDied","Data":"1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f"} Nov 25 09:18:14 crc kubenswrapper[4687]: I1125 09:18:14.109850 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrvrv" event={"ID":"7494503f-86a1-4365-92d4-91a47f6f6d03","Type":"ContainerStarted","Data":"bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3"} Nov 25 09:18:15 crc kubenswrapper[4687]: I1125 09:18:15.116201 4687 generic.go:334] "Generic (PLEG): container finished" podID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerID="bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3" exitCode=0 Nov 25 09:18:15 crc kubenswrapper[4687]: I1125 09:18:15.116268 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrvrv" event={"ID":"7494503f-86a1-4365-92d4-91a47f6f6d03","Type":"ContainerDied","Data":"bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3"} Nov 25 09:18:16 crc kubenswrapper[4687]: I1125 09:18:16.124208 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrvrv" event={"ID":"7494503f-86a1-4365-92d4-91a47f6f6d03","Type":"ContainerStarted","Data":"60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096"} Nov 25 09:18:16 crc kubenswrapper[4687]: I1125 09:18:16.150305 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jrvrv" podStartSLOduration=3.411692099 podStartE2EDuration="6.150286882s" podCreationTimestamp="2025-11-25 09:18:10 +0000 UTC" firstStartedPulling="2025-11-25 09:18:12.186837207 +0000 UTC m=+887.240476935" lastFinishedPulling="2025-11-25 09:18:14.925432 +0000 UTC m=+889.979071718" observedRunningTime="2025-11-25 09:18:16.147549137 +0000 UTC m=+891.201188875" watchObservedRunningTime="2025-11-25 09:18:16.150286882 +0000 UTC m=+891.203926600" Nov 25 09:18:20 crc kubenswrapper[4687]: I1125 09:18:20.697167 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:20 crc kubenswrapper[4687]: I1125 09:18:20.697921 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:20 crc kubenswrapper[4687]: I1125 09:18:20.764364 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:21 crc kubenswrapper[4687]: I1125 09:18:21.190816 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:21 crc kubenswrapper[4687]: I1125 09:18:21.232961 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrvrv"] Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.158864 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jrvrv" podUID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerName="registry-server" containerID="cri-o://60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096" gracePeriod=2 Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.517635 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.715593 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fd696\" (UniqueName: \"kubernetes.io/projected/7494503f-86a1-4365-92d4-91a47f6f6d03-kube-api-access-fd696\") pod \"7494503f-86a1-4365-92d4-91a47f6f6d03\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.715740 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-catalog-content\") pod \"7494503f-86a1-4365-92d4-91a47f6f6d03\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.715774 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-utilities\") pod \"7494503f-86a1-4365-92d4-91a47f6f6d03\" (UID: \"7494503f-86a1-4365-92d4-91a47f6f6d03\") " Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.718609 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-utilities" (OuterVolumeSpecName: "utilities") pod "7494503f-86a1-4365-92d4-91a47f6f6d03" (UID: "7494503f-86a1-4365-92d4-91a47f6f6d03"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.722767 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7494503f-86a1-4365-92d4-91a47f6f6d03-kube-api-access-fd696" (OuterVolumeSpecName: "kube-api-access-fd696") pod "7494503f-86a1-4365-92d4-91a47f6f6d03" (UID: "7494503f-86a1-4365-92d4-91a47f6f6d03"). InnerVolumeSpecName "kube-api-access-fd696". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.738700 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7494503f-86a1-4365-92d4-91a47f6f6d03" (UID: "7494503f-86a1-4365-92d4-91a47f6f6d03"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.817273 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fd696\" (UniqueName: \"kubernetes.io/projected/7494503f-86a1-4365-92d4-91a47f6f6d03-kube-api-access-fd696\") on node \"crc\" DevicePath \"\"" Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.817314 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:18:23 crc kubenswrapper[4687]: I1125 09:18:23.817327 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7494503f-86a1-4365-92d4-91a47f6f6d03-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.165342 4687 generic.go:334] "Generic (PLEG): container finished" podID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerID="60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096" exitCode=0 Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.165393 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrvrv" event={"ID":"7494503f-86a1-4365-92d4-91a47f6f6d03","Type":"ContainerDied","Data":"60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096"} Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.165399 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrvrv" Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.165424 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrvrv" event={"ID":"7494503f-86a1-4365-92d4-91a47f6f6d03","Type":"ContainerDied","Data":"be6da64e6e95c109a3f8c8dfc3faec267af3e55bbc07abdee23f8bdf47789a4e"} Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.165445 4687 scope.go:117] "RemoveContainer" containerID="60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096" Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.182217 4687 scope.go:117] "RemoveContainer" containerID="bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3" Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.215675 4687 scope.go:117] "RemoveContainer" containerID="1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f" Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.218164 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrvrv"] Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.222576 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrvrv"] Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.239299 4687 scope.go:117] "RemoveContainer" containerID="60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096" Nov 25 09:18:24 crc kubenswrapper[4687]: E1125 09:18:24.239738 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096\": container with ID starting with 60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096 not found: ID does not exist" containerID="60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096" Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.239774 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096"} err="failed to get container status \"60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096\": rpc error: code = NotFound desc = could not find container \"60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096\": container with ID starting with 60e60b503d238cd62e5a74b56ad5ab7be88c1ad4ebea09fed3e39fc5e0914096 not found: ID does not exist" Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.239798 4687 scope.go:117] "RemoveContainer" containerID="bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3" Nov 25 09:18:24 crc kubenswrapper[4687]: E1125 09:18:24.240079 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3\": container with ID starting with bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3 not found: ID does not exist" containerID="bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3" Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.240102 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3"} err="failed to get container status \"bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3\": rpc error: code = NotFound desc = could not find container \"bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3\": container with ID starting with bc113a9b1fb534a7b8459045b9355969a78ee63668d420e5d2165f3da0916ad3 not found: ID does not exist" Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.240118 4687 scope.go:117] "RemoveContainer" containerID="1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f" Nov 25 09:18:24 crc kubenswrapper[4687]: E1125 09:18:24.240356 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f\": container with ID starting with 1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f not found: ID does not exist" containerID="1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f" Nov 25 09:18:24 crc kubenswrapper[4687]: I1125 09:18:24.240381 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f"} err="failed to get container status \"1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f\": rpc error: code = NotFound desc = could not find container \"1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f\": container with ID starting with 1e6f4cfc598368d008edc55cf5d7c4431a359bb96bde07392e47bab19a92199f not found: ID does not exist" Nov 25 09:18:25 crc kubenswrapper[4687]: I1125 09:18:25.747364 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7494503f-86a1-4365-92d4-91a47f6f6d03" path="/var/lib/kubelet/pods/7494503f-86a1-4365-92d4-91a47f6f6d03/volumes" Nov 25 09:18:29 crc kubenswrapper[4687]: I1125 09:18:29.402384 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-558db5dd86-fc58n" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.231409 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-cf84d"] Nov 25 09:18:30 crc kubenswrapper[4687]: E1125 09:18:30.231804 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerName="extract-utilities" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.231827 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerName="extract-utilities" Nov 25 09:18:30 crc kubenswrapper[4687]: E1125 09:18:30.231846 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerName="extract-content" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.231858 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerName="extract-content" Nov 25 09:18:30 crc kubenswrapper[4687]: E1125 09:18:30.231879 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerName="registry-server" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.231893 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerName="registry-server" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.232073 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="7494503f-86a1-4365-92d4-91a47f6f6d03" containerName="registry-server" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.235891 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.238999 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp"] Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.239661 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.240072 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.240344 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-86gwq" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.240696 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.241436 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.253064 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp"] Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.326442 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-q7pt2"] Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.327307 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.329517 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.329534 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.329564 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.331757 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-4z69p" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.353097 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-6jtwr"] Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.354348 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.356707 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.363113 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-6jtwr"] Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.400336 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lxjh\" (UniqueName: \"kubernetes.io/projected/eb69d750-834f-4728-8a20-f37dc1195e86-kube-api-access-9lxjh\") pod \"controller-6c7b4b5f48-6jtwr\" (UID: \"eb69d750-834f-4728-8a20-f37dc1195e86\") " pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.400388 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np8hq\" (UniqueName: \"kubernetes.io/projected/633f6bd8-eb2e-485d-8ef6-67800b34f877-kube-api-access-np8hq\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.400409 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/633f6bd8-eb2e-485d-8ef6-67800b34f877-frr-startup\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.400427 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/eb69d750-834f-4728-8a20-f37dc1195e86-cert\") pod \"controller-6c7b4b5f48-6jtwr\" (UID: \"eb69d750-834f-4728-8a20-f37dc1195e86\") " pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.400444 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-reloader\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.400475 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b9abb225-82ca-44ea-a30c-ec214deb3316-cert\") pod \"frr-k8s-webhook-server-6998585d5-ddrgp\" (UID: \"b9abb225-82ca-44ea-a30c-ec214deb3316\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.400572 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a2ef7e49-e737-462f-8ff8-b045611d5baf-metallb-excludel2\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.401100 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmbnn\" (UniqueName: \"kubernetes.io/projected/b9abb225-82ca-44ea-a30c-ec214deb3316-kube-api-access-qmbnn\") pod \"frr-k8s-webhook-server-6998585d5-ddrgp\" (UID: \"b9abb225-82ca-44ea-a30c-ec214deb3316\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.408268 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eb69d750-834f-4728-8a20-f37dc1195e86-metrics-certs\") pod \"controller-6c7b4b5f48-6jtwr\" (UID: \"eb69d750-834f-4728-8a20-f37dc1195e86\") " pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.408334 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-frr-conf\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.408357 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/633f6bd8-eb2e-485d-8ef6-67800b34f877-metrics-certs\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.408431 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-frr-sockets\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.408481 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-metrics-certs\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.408549 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-memberlist\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.408574 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5vvj\" (UniqueName: \"kubernetes.io/projected/a2ef7e49-e737-462f-8ff8-b045611d5baf-kube-api-access-m5vvj\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.408596 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-metrics\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509403 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-frr-conf\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509466 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/633f6bd8-eb2e-485d-8ef6-67800b34f877-metrics-certs\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509545 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-frr-sockets\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509573 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-metrics-certs\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509589 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-memberlist\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509619 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5vvj\" (UniqueName: \"kubernetes.io/projected/a2ef7e49-e737-462f-8ff8-b045611d5baf-kube-api-access-m5vvj\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509640 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-metrics\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509661 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lxjh\" (UniqueName: \"kubernetes.io/projected/eb69d750-834f-4728-8a20-f37dc1195e86-kube-api-access-9lxjh\") pod \"controller-6c7b4b5f48-6jtwr\" (UID: \"eb69d750-834f-4728-8a20-f37dc1195e86\") " pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509681 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np8hq\" (UniqueName: \"kubernetes.io/projected/633f6bd8-eb2e-485d-8ef6-67800b34f877-kube-api-access-np8hq\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509697 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/633f6bd8-eb2e-485d-8ef6-67800b34f877-frr-startup\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509713 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/eb69d750-834f-4728-8a20-f37dc1195e86-cert\") pod \"controller-6c7b4b5f48-6jtwr\" (UID: \"eb69d750-834f-4728-8a20-f37dc1195e86\") " pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509731 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-reloader\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509748 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b9abb225-82ca-44ea-a30c-ec214deb3316-cert\") pod \"frr-k8s-webhook-server-6998585d5-ddrgp\" (UID: \"b9abb225-82ca-44ea-a30c-ec214deb3316\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509773 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a2ef7e49-e737-462f-8ff8-b045611d5baf-metallb-excludel2\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509798 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmbnn\" (UniqueName: \"kubernetes.io/projected/b9abb225-82ca-44ea-a30c-ec214deb3316-kube-api-access-qmbnn\") pod \"frr-k8s-webhook-server-6998585d5-ddrgp\" (UID: \"b9abb225-82ca-44ea-a30c-ec214deb3316\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" Nov 25 09:18:30 crc kubenswrapper[4687]: E1125 09:18:30.509804 4687 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 09:18:30 crc kubenswrapper[4687]: E1125 09:18:30.509870 4687 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 25 09:18:30 crc kubenswrapper[4687]: E1125 09:18:30.509901 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-memberlist podName:a2ef7e49-e737-462f-8ff8-b045611d5baf nodeName:}" failed. No retries permitted until 2025-11-25 09:18:31.009880262 +0000 UTC m=+906.063519980 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-memberlist") pod "speaker-q7pt2" (UID: "a2ef7e49-e737-462f-8ff8-b045611d5baf") : secret "metallb-memberlist" not found Nov 25 09:18:30 crc kubenswrapper[4687]: E1125 09:18:30.509981 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-metrics-certs podName:a2ef7e49-e737-462f-8ff8-b045611d5baf nodeName:}" failed. No retries permitted until 2025-11-25 09:18:31.009957644 +0000 UTC m=+906.063597362 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-metrics-certs") pod "speaker-q7pt2" (UID: "a2ef7e49-e737-462f-8ff8-b045611d5baf") : secret "speaker-certs-secret" not found Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.509817 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eb69d750-834f-4728-8a20-f37dc1195e86-metrics-certs\") pod \"controller-6c7b4b5f48-6jtwr\" (UID: \"eb69d750-834f-4728-8a20-f37dc1195e86\") " pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.510384 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-reloader\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.510428 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-metrics\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.510748 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-frr-conf\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.510824 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a2ef7e49-e737-462f-8ff8-b045611d5baf-metallb-excludel2\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.510995 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/633f6bd8-eb2e-485d-8ef6-67800b34f877-frr-startup\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.511328 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/633f6bd8-eb2e-485d-8ef6-67800b34f877-frr-sockets\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.513665 4687 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.525824 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/633f6bd8-eb2e-485d-8ef6-67800b34f877-metrics-certs\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.526048 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b9abb225-82ca-44ea-a30c-ec214deb3316-cert\") pod \"frr-k8s-webhook-server-6998585d5-ddrgp\" (UID: \"b9abb225-82ca-44ea-a30c-ec214deb3316\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.528059 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/eb69d750-834f-4728-8a20-f37dc1195e86-cert\") pod \"controller-6c7b4b5f48-6jtwr\" (UID: \"eb69d750-834f-4728-8a20-f37dc1195e86\") " pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.530420 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5vvj\" (UniqueName: \"kubernetes.io/projected/a2ef7e49-e737-462f-8ff8-b045611d5baf-kube-api-access-m5vvj\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.530543 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmbnn\" (UniqueName: \"kubernetes.io/projected/b9abb225-82ca-44ea-a30c-ec214deb3316-kube-api-access-qmbnn\") pod \"frr-k8s-webhook-server-6998585d5-ddrgp\" (UID: \"b9abb225-82ca-44ea-a30c-ec214deb3316\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.530921 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np8hq\" (UniqueName: \"kubernetes.io/projected/633f6bd8-eb2e-485d-8ef6-67800b34f877-kube-api-access-np8hq\") pod \"frr-k8s-cf84d\" (UID: \"633f6bd8-eb2e-485d-8ef6-67800b34f877\") " pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.533001 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lxjh\" (UniqueName: \"kubernetes.io/projected/eb69d750-834f-4728-8a20-f37dc1195e86-kube-api-access-9lxjh\") pod \"controller-6c7b4b5f48-6jtwr\" (UID: \"eb69d750-834f-4728-8a20-f37dc1195e86\") " pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.533140 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eb69d750-834f-4728-8a20-f37dc1195e86-metrics-certs\") pod \"controller-6c7b4b5f48-6jtwr\" (UID: \"eb69d750-834f-4728-8a20-f37dc1195e86\") " pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.553879 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.561241 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" Nov 25 09:18:30 crc kubenswrapper[4687]: I1125 09:18:30.667738 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:31 crc kubenswrapper[4687]: I1125 09:18:31.017713 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp"] Nov 25 09:18:31 crc kubenswrapper[4687]: I1125 09:18:31.018103 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-metrics-certs\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:31 crc kubenswrapper[4687]: I1125 09:18:31.018236 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-memberlist\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:31 crc kubenswrapper[4687]: E1125 09:18:31.018454 4687 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 09:18:31 crc kubenswrapper[4687]: E1125 09:18:31.018568 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-memberlist podName:a2ef7e49-e737-462f-8ff8-b045611d5baf nodeName:}" failed. No retries permitted until 2025-11-25 09:18:32.018545164 +0000 UTC m=+907.072184892 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-memberlist") pod "speaker-q7pt2" (UID: "a2ef7e49-e737-462f-8ff8-b045611d5baf") : secret "metallb-memberlist" not found Nov 25 09:18:31 crc kubenswrapper[4687]: W1125 09:18:31.021279 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9abb225_82ca_44ea_a30c_ec214deb3316.slice/crio-4c4fa3fa62ba24d16548d0854f1b6228d816b7c0bce452649067a42482d4c9fd WatchSource:0}: Error finding container 4c4fa3fa62ba24d16548d0854f1b6228d816b7c0bce452649067a42482d4c9fd: Status 404 returned error can't find the container with id 4c4fa3fa62ba24d16548d0854f1b6228d816b7c0bce452649067a42482d4c9fd Nov 25 09:18:31 crc kubenswrapper[4687]: I1125 09:18:31.026119 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-metrics-certs\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:31 crc kubenswrapper[4687]: I1125 09:18:31.101212 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-6jtwr"] Nov 25 09:18:31 crc kubenswrapper[4687]: W1125 09:18:31.106217 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb69d750_834f_4728_8a20_f37dc1195e86.slice/crio-985fe8f5b0228f3d700281d9077b9895181081a8d69f7c2a1937c9cb725bbc50 WatchSource:0}: Error finding container 985fe8f5b0228f3d700281d9077b9895181081a8d69f7c2a1937c9cb725bbc50: Status 404 returned error can't find the container with id 985fe8f5b0228f3d700281d9077b9895181081a8d69f7c2a1937c9cb725bbc50 Nov 25 09:18:31 crc kubenswrapper[4687]: I1125 09:18:31.208276 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-6jtwr" event={"ID":"eb69d750-834f-4728-8a20-f37dc1195e86","Type":"ContainerStarted","Data":"985fe8f5b0228f3d700281d9077b9895181081a8d69f7c2a1937c9cb725bbc50"} Nov 25 09:18:31 crc kubenswrapper[4687]: I1125 09:18:31.209860 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" event={"ID":"b9abb225-82ca-44ea-a30c-ec214deb3316","Type":"ContainerStarted","Data":"4c4fa3fa62ba24d16548d0854f1b6228d816b7c0bce452649067a42482d4c9fd"} Nov 25 09:18:31 crc kubenswrapper[4687]: I1125 09:18:31.210909 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-cf84d" event={"ID":"633f6bd8-eb2e-485d-8ef6-67800b34f877","Type":"ContainerStarted","Data":"0ef53e81ca349d1c7c595f28f63ebdae0ca80a91f8362dd13269a0dbeafc2dad"} Nov 25 09:18:32 crc kubenswrapper[4687]: I1125 09:18:32.035766 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-memberlist\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:32 crc kubenswrapper[4687]: I1125 09:18:32.052385 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a2ef7e49-e737-462f-8ff8-b045611d5baf-memberlist\") pod \"speaker-q7pt2\" (UID: \"a2ef7e49-e737-462f-8ff8-b045611d5baf\") " pod="metallb-system/speaker-q7pt2" Nov 25 09:18:32 crc kubenswrapper[4687]: I1125 09:18:32.141404 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-q7pt2" Nov 25 09:18:32 crc kubenswrapper[4687]: W1125 09:18:32.178348 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2ef7e49_e737_462f_8ff8_b045611d5baf.slice/crio-cf88039c2bfc41c7a5db5ecfdc245ae04e2750f8dc266536777e2dffde61645b WatchSource:0}: Error finding container cf88039c2bfc41c7a5db5ecfdc245ae04e2750f8dc266536777e2dffde61645b: Status 404 returned error can't find the container with id cf88039c2bfc41c7a5db5ecfdc245ae04e2750f8dc266536777e2dffde61645b Nov 25 09:18:32 crc kubenswrapper[4687]: I1125 09:18:32.238356 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-6jtwr" event={"ID":"eb69d750-834f-4728-8a20-f37dc1195e86","Type":"ContainerStarted","Data":"215a47cbcfeaf65f5b863e87a241a2c451681dba9c8583cf437996d3ebd6fd88"} Nov 25 09:18:32 crc kubenswrapper[4687]: I1125 09:18:32.238408 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-6jtwr" event={"ID":"eb69d750-834f-4728-8a20-f37dc1195e86","Type":"ContainerStarted","Data":"15780e534462157549fa1f623544d188c1f47d50c55153912fc8f249eaf08352"} Nov 25 09:18:32 crc kubenswrapper[4687]: I1125 09:18:32.238725 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:32 crc kubenswrapper[4687]: I1125 09:18:32.251148 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-q7pt2" event={"ID":"a2ef7e49-e737-462f-8ff8-b045611d5baf","Type":"ContainerStarted","Data":"cf88039c2bfc41c7a5db5ecfdc245ae04e2750f8dc266536777e2dffde61645b"} Nov 25 09:18:33 crc kubenswrapper[4687]: I1125 09:18:33.261548 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-q7pt2" event={"ID":"a2ef7e49-e737-462f-8ff8-b045611d5baf","Type":"ContainerStarted","Data":"3f5308db4625356e1fa8a2467091177451403d58ff6c474b328c1c516efb1a78"} Nov 25 09:18:33 crc kubenswrapper[4687]: I1125 09:18:33.261592 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-q7pt2" event={"ID":"a2ef7e49-e737-462f-8ff8-b045611d5baf","Type":"ContainerStarted","Data":"7d8d56e1e75abbca329d442196150454becf773aef0b8b08bc0b2d816f0a3a4d"} Nov 25 09:18:33 crc kubenswrapper[4687]: I1125 09:18:33.261888 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-q7pt2" Nov 25 09:18:33 crc kubenswrapper[4687]: I1125 09:18:33.279213 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-q7pt2" podStartSLOduration=3.279196438 podStartE2EDuration="3.279196438s" podCreationTimestamp="2025-11-25 09:18:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:18:33.278085347 +0000 UTC m=+908.331725065" watchObservedRunningTime="2025-11-25 09:18:33.279196438 +0000 UTC m=+908.332836146" Nov 25 09:18:33 crc kubenswrapper[4687]: I1125 09:18:33.281007 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-6jtwr" podStartSLOduration=3.281000807 podStartE2EDuration="3.281000807s" podCreationTimestamp="2025-11-25 09:18:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:18:32.27271461 +0000 UTC m=+907.326354328" watchObservedRunningTime="2025-11-25 09:18:33.281000807 +0000 UTC m=+908.334640525" Nov 25 09:18:38 crc kubenswrapper[4687]: I1125 09:18:38.291232 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" event={"ID":"b9abb225-82ca-44ea-a30c-ec214deb3316","Type":"ContainerStarted","Data":"da376dd4a166fe52d3b93722c7e94af859a00f5f9282bf70b85bda6748ff09c0"} Nov 25 09:18:38 crc kubenswrapper[4687]: I1125 09:18:38.292754 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" Nov 25 09:18:38 crc kubenswrapper[4687]: I1125 09:18:38.293476 4687 generic.go:334] "Generic (PLEG): container finished" podID="633f6bd8-eb2e-485d-8ef6-67800b34f877" containerID="77be78e1b27549480e35627f7d393605a7d3a62dda9e76c5457efc557fcb743d" exitCode=0 Nov 25 09:18:38 crc kubenswrapper[4687]: I1125 09:18:38.293546 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-cf84d" event={"ID":"633f6bd8-eb2e-485d-8ef6-67800b34f877","Type":"ContainerDied","Data":"77be78e1b27549480e35627f7d393605a7d3a62dda9e76c5457efc557fcb743d"} Nov 25 09:18:38 crc kubenswrapper[4687]: I1125 09:18:38.319611 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" podStartSLOduration=2.097640813 podStartE2EDuration="8.319588503s" podCreationTimestamp="2025-11-25 09:18:30 +0000 UTC" firstStartedPulling="2025-11-25 09:18:31.024287432 +0000 UTC m=+906.077927170" lastFinishedPulling="2025-11-25 09:18:37.246235142 +0000 UTC m=+912.299874860" observedRunningTime="2025-11-25 09:18:38.312535469 +0000 UTC m=+913.366175237" watchObservedRunningTime="2025-11-25 09:18:38.319588503 +0000 UTC m=+913.373228241" Nov 25 09:18:39 crc kubenswrapper[4687]: I1125 09:18:39.300364 4687 generic.go:334] "Generic (PLEG): container finished" podID="633f6bd8-eb2e-485d-8ef6-67800b34f877" containerID="0f6d744769a42bfe922a706cb3f2e7acbaaa2c72c369671b762978a7fe21042e" exitCode=0 Nov 25 09:18:39 crc kubenswrapper[4687]: I1125 09:18:39.300716 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-cf84d" event={"ID":"633f6bd8-eb2e-485d-8ef6-67800b34f877","Type":"ContainerDied","Data":"0f6d744769a42bfe922a706cb3f2e7acbaaa2c72c369671b762978a7fe21042e"} Nov 25 09:18:40 crc kubenswrapper[4687]: I1125 09:18:40.308425 4687 generic.go:334] "Generic (PLEG): container finished" podID="633f6bd8-eb2e-485d-8ef6-67800b34f877" containerID="f7711c5350b9041435ef6f7e8b4097b735dccfc766106e10b6ee7307b541e791" exitCode=0 Nov 25 09:18:40 crc kubenswrapper[4687]: I1125 09:18:40.308478 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-cf84d" event={"ID":"633f6bd8-eb2e-485d-8ef6-67800b34f877","Type":"ContainerDied","Data":"f7711c5350b9041435ef6f7e8b4097b735dccfc766106e10b6ee7307b541e791"} Nov 25 09:18:41 crc kubenswrapper[4687]: I1125 09:18:41.320266 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-cf84d" event={"ID":"633f6bd8-eb2e-485d-8ef6-67800b34f877","Type":"ContainerStarted","Data":"227a18a5ef2156dd026f8d638acaa92949c25e33debf99af008141d64d06e597"} Nov 25 09:18:41 crc kubenswrapper[4687]: I1125 09:18:41.320628 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-cf84d" event={"ID":"633f6bd8-eb2e-485d-8ef6-67800b34f877","Type":"ContainerStarted","Data":"2d2e144b66a798296ad3293d4e98e5dafc28261bf68a59f6e07a6a0d2af3c0e5"} Nov 25 09:18:41 crc kubenswrapper[4687]: I1125 09:18:41.320761 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:41 crc kubenswrapper[4687]: I1125 09:18:41.320780 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-cf84d" event={"ID":"633f6bd8-eb2e-485d-8ef6-67800b34f877","Type":"ContainerStarted","Data":"843610f51a5e1c9bf01e1196b6c49a609e64b7e8c96202f6a1d63393104fe905"} Nov 25 09:18:41 crc kubenswrapper[4687]: I1125 09:18:41.320791 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-cf84d" event={"ID":"633f6bd8-eb2e-485d-8ef6-67800b34f877","Type":"ContainerStarted","Data":"2c5cd7198fe6e413a642b1c44221f3b3919497cc0d0dc5080f3ca1280fe36c04"} Nov 25 09:18:41 crc kubenswrapper[4687]: I1125 09:18:41.320799 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-cf84d" event={"ID":"633f6bd8-eb2e-485d-8ef6-67800b34f877","Type":"ContainerStarted","Data":"f6f49482e90d7d6123040be58714af41c6e4e2c684e4e2878cafc232a61be553"} Nov 25 09:18:41 crc kubenswrapper[4687]: I1125 09:18:41.320808 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-cf84d" event={"ID":"633f6bd8-eb2e-485d-8ef6-67800b34f877","Type":"ContainerStarted","Data":"1323404e76f98321dc5a95356f0e4aa63ade1850ddcb05d870448aec11c1adbc"} Nov 25 09:18:41 crc kubenswrapper[4687]: I1125 09:18:41.346245 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-cf84d" podStartSLOduration=4.8000522839999995 podStartE2EDuration="11.346228022s" podCreationTimestamp="2025-11-25 09:18:30 +0000 UTC" firstStartedPulling="2025-11-25 09:18:30.735427644 +0000 UTC m=+905.789067352" lastFinishedPulling="2025-11-25 09:18:37.281603372 +0000 UTC m=+912.335243090" observedRunningTime="2025-11-25 09:18:41.344236106 +0000 UTC m=+916.397875824" watchObservedRunningTime="2025-11-25 09:18:41.346228022 +0000 UTC m=+916.399867740" Nov 25 09:18:42 crc kubenswrapper[4687]: I1125 09:18:42.145482 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-q7pt2" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.259786 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-86jr7"] Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.260822 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-86jr7" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.263670 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-xkhb7" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.264351 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.264398 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.281385 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-86jr7"] Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.428430 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6hbm\" (UniqueName: \"kubernetes.io/projected/46bc03ee-6170-475d-937a-ddc05a06c2b0-kube-api-access-f6hbm\") pod \"openstack-operator-index-86jr7\" (UID: \"46bc03ee-6170-475d-937a-ddc05a06c2b0\") " pod="openstack-operators/openstack-operator-index-86jr7" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.530192 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6hbm\" (UniqueName: \"kubernetes.io/projected/46bc03ee-6170-475d-937a-ddc05a06c2b0-kube-api-access-f6hbm\") pod \"openstack-operator-index-86jr7\" (UID: \"46bc03ee-6170-475d-937a-ddc05a06c2b0\") " pod="openstack-operators/openstack-operator-index-86jr7" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.550192 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6hbm\" (UniqueName: \"kubernetes.io/projected/46bc03ee-6170-475d-937a-ddc05a06c2b0-kube-api-access-f6hbm\") pod \"openstack-operator-index-86jr7\" (UID: \"46bc03ee-6170-475d-937a-ddc05a06c2b0\") " pod="openstack-operators/openstack-operator-index-86jr7" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.554092 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.589237 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-86jr7" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.608032 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:45 crc kubenswrapper[4687]: I1125 09:18:45.809469 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-86jr7"] Nov 25 09:18:46 crc kubenswrapper[4687]: I1125 09:18:46.352490 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-86jr7" event={"ID":"46bc03ee-6170-475d-937a-ddc05a06c2b0","Type":"ContainerStarted","Data":"24725343de05650471ccc89fa1a527fd304253253a2b43e92412fd0f807be51e"} Nov 25 09:18:48 crc kubenswrapper[4687]: I1125 09:18:48.370785 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-86jr7" event={"ID":"46bc03ee-6170-475d-937a-ddc05a06c2b0","Type":"ContainerStarted","Data":"ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186"} Nov 25 09:18:48 crc kubenswrapper[4687]: I1125 09:18:48.390024 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-86jr7" podStartSLOduration=1.169930236 podStartE2EDuration="3.389998697s" podCreationTimestamp="2025-11-25 09:18:45 +0000 UTC" firstStartedPulling="2025-11-25 09:18:45.818809073 +0000 UTC m=+920.872448791" lastFinishedPulling="2025-11-25 09:18:48.038877534 +0000 UTC m=+923.092517252" observedRunningTime="2025-11-25 09:18:48.384747854 +0000 UTC m=+923.438387612" watchObservedRunningTime="2025-11-25 09:18:48.389998697 +0000 UTC m=+923.443638415" Nov 25 09:18:48 crc kubenswrapper[4687]: I1125 09:18:48.638229 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-86jr7"] Nov 25 09:18:49 crc kubenswrapper[4687]: I1125 09:18:49.244886 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-xg64b"] Nov 25 09:18:49 crc kubenswrapper[4687]: I1125 09:18:49.246075 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-xg64b" Nov 25 09:18:49 crc kubenswrapper[4687]: I1125 09:18:49.252128 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-xg64b"] Nov 25 09:18:49 crc kubenswrapper[4687]: I1125 09:18:49.381822 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbgqb\" (UniqueName: \"kubernetes.io/projected/fabb562b-35a6-4e1d-bdd5-5357491f9ad6-kube-api-access-hbgqb\") pod \"openstack-operator-index-xg64b\" (UID: \"fabb562b-35a6-4e1d-bdd5-5357491f9ad6\") " pod="openstack-operators/openstack-operator-index-xg64b" Nov 25 09:18:49 crc kubenswrapper[4687]: I1125 09:18:49.483220 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbgqb\" (UniqueName: \"kubernetes.io/projected/fabb562b-35a6-4e1d-bdd5-5357491f9ad6-kube-api-access-hbgqb\") pod \"openstack-operator-index-xg64b\" (UID: \"fabb562b-35a6-4e1d-bdd5-5357491f9ad6\") " pod="openstack-operators/openstack-operator-index-xg64b" Nov 25 09:18:49 crc kubenswrapper[4687]: I1125 09:18:49.500648 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbgqb\" (UniqueName: \"kubernetes.io/projected/fabb562b-35a6-4e1d-bdd5-5357491f9ad6-kube-api-access-hbgqb\") pod \"openstack-operator-index-xg64b\" (UID: \"fabb562b-35a6-4e1d-bdd5-5357491f9ad6\") " pod="openstack-operators/openstack-operator-index-xg64b" Nov 25 09:18:49 crc kubenswrapper[4687]: I1125 09:18:49.579294 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-xg64b" Nov 25 09:18:49 crc kubenswrapper[4687]: I1125 09:18:49.968265 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-xg64b"] Nov 25 09:18:49 crc kubenswrapper[4687]: W1125 09:18:49.973822 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfabb562b_35a6_4e1d_bdd5_5357491f9ad6.slice/crio-adea1bcc396453e07296defe16a0b1c8bf584617cb42625545adb42f08763048 WatchSource:0}: Error finding container adea1bcc396453e07296defe16a0b1c8bf584617cb42625545adb42f08763048: Status 404 returned error can't find the container with id adea1bcc396453e07296defe16a0b1c8bf584617cb42625545adb42f08763048 Nov 25 09:18:50 crc kubenswrapper[4687]: I1125 09:18:50.383547 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xg64b" event={"ID":"fabb562b-35a6-4e1d-bdd5-5357491f9ad6","Type":"ContainerStarted","Data":"77339d527fa9b8580d6e44e704e5cc03f7261b1552802087cc3a9490b716a754"} Nov 25 09:18:50 crc kubenswrapper[4687]: I1125 09:18:50.383604 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xg64b" event={"ID":"fabb562b-35a6-4e1d-bdd5-5357491f9ad6","Type":"ContainerStarted","Data":"adea1bcc396453e07296defe16a0b1c8bf584617cb42625545adb42f08763048"} Nov 25 09:18:50 crc kubenswrapper[4687]: I1125 09:18:50.385013 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-86jr7" podUID="46bc03ee-6170-475d-937a-ddc05a06c2b0" containerName="registry-server" containerID="cri-o://ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186" gracePeriod=2 Nov 25 09:18:50 crc kubenswrapper[4687]: I1125 09:18:50.408381 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-xg64b" podStartSLOduration=1.363191512 podStartE2EDuration="1.40835645s" podCreationTimestamp="2025-11-25 09:18:49 +0000 UTC" firstStartedPulling="2025-11-25 09:18:49.976504513 +0000 UTC m=+925.030144231" lastFinishedPulling="2025-11-25 09:18:50.021669461 +0000 UTC m=+925.075309169" observedRunningTime="2025-11-25 09:18:50.403198708 +0000 UTC m=+925.456838446" watchObservedRunningTime="2025-11-25 09:18:50.40835645 +0000 UTC m=+925.461996178" Nov 25 09:18:50 crc kubenswrapper[4687]: I1125 09:18:50.556625 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-cf84d" Nov 25 09:18:50 crc kubenswrapper[4687]: I1125 09:18:50.567769 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ddrgp" Nov 25 09:18:50 crc kubenswrapper[4687]: I1125 09:18:50.671915 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-6jtwr" Nov 25 09:18:50 crc kubenswrapper[4687]: I1125 09:18:50.783913 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-86jr7" Nov 25 09:18:50 crc kubenswrapper[4687]: I1125 09:18:50.904010 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6hbm\" (UniqueName: \"kubernetes.io/projected/46bc03ee-6170-475d-937a-ddc05a06c2b0-kube-api-access-f6hbm\") pod \"46bc03ee-6170-475d-937a-ddc05a06c2b0\" (UID: \"46bc03ee-6170-475d-937a-ddc05a06c2b0\") " Nov 25 09:18:50 crc kubenswrapper[4687]: I1125 09:18:50.911339 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46bc03ee-6170-475d-937a-ddc05a06c2b0-kube-api-access-f6hbm" (OuterVolumeSpecName: "kube-api-access-f6hbm") pod "46bc03ee-6170-475d-937a-ddc05a06c2b0" (UID: "46bc03ee-6170-475d-937a-ddc05a06c2b0"). InnerVolumeSpecName "kube-api-access-f6hbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.005862 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6hbm\" (UniqueName: \"kubernetes.io/projected/46bc03ee-6170-475d-937a-ddc05a06c2b0-kube-api-access-f6hbm\") on node \"crc\" DevicePath \"\"" Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.391041 4687 generic.go:334] "Generic (PLEG): container finished" podID="46bc03ee-6170-475d-937a-ddc05a06c2b0" containerID="ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186" exitCode=0 Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.391132 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-86jr7" Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.391721 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-86jr7" event={"ID":"46bc03ee-6170-475d-937a-ddc05a06c2b0","Type":"ContainerDied","Data":"ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186"} Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.391744 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-86jr7" event={"ID":"46bc03ee-6170-475d-937a-ddc05a06c2b0","Type":"ContainerDied","Data":"24725343de05650471ccc89fa1a527fd304253253a2b43e92412fd0f807be51e"} Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.391759 4687 scope.go:117] "RemoveContainer" containerID="ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186" Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.409714 4687 scope.go:117] "RemoveContainer" containerID="ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186" Nov 25 09:18:51 crc kubenswrapper[4687]: E1125 09:18:51.410235 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186\": container with ID starting with ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186 not found: ID does not exist" containerID="ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186" Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.410270 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186"} err="failed to get container status \"ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186\": rpc error: code = NotFound desc = could not find container \"ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186\": container with ID starting with ab8859a6624ae5f1c9d017a0f2c3f5d4636d8befc8ef0c8cf5ed97cc6188a186 not found: ID does not exist" Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.432230 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-86jr7"] Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.436630 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-86jr7"] Nov 25 09:18:51 crc kubenswrapper[4687]: I1125 09:18:51.744066 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46bc03ee-6170-475d-937a-ddc05a06c2b0" path="/var/lib/kubelet/pods/46bc03ee-6170-475d-937a-ddc05a06c2b0/volumes" Nov 25 09:18:59 crc kubenswrapper[4687]: I1125 09:18:59.580313 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-xg64b" Nov 25 09:18:59 crc kubenswrapper[4687]: I1125 09:18:59.581125 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-xg64b" Nov 25 09:18:59 crc kubenswrapper[4687]: I1125 09:18:59.606383 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-xg64b" Nov 25 09:19:00 crc kubenswrapper[4687]: I1125 09:19:00.480290 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-xg64b" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.489711 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb"] Nov 25 09:19:01 crc kubenswrapper[4687]: E1125 09:19:01.490283 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46bc03ee-6170-475d-937a-ddc05a06c2b0" containerName="registry-server" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.490300 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="46bc03ee-6170-475d-937a-ddc05a06c2b0" containerName="registry-server" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.490440 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="46bc03ee-6170-475d-937a-ddc05a06c2b0" containerName="registry-server" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.491361 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.493086 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-sx4zk" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.500745 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb"] Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.646651 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hm4r\" (UniqueName: \"kubernetes.io/projected/989d467f-e529-48b1-ac6b-f8509d9ae3f8-kube-api-access-5hm4r\") pod \"4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.646749 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-bundle\") pod \"4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.646776 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-util\") pod \"4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.748220 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-bundle\") pod \"4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.748307 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-util\") pod \"4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.748361 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hm4r\" (UniqueName: \"kubernetes.io/projected/989d467f-e529-48b1-ac6b-f8509d9ae3f8-kube-api-access-5hm4r\") pod \"4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.749061 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-bundle\") pod \"4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.749103 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-util\") pod \"4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.769913 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hm4r\" (UniqueName: \"kubernetes.io/projected/989d467f-e529-48b1-ac6b-f8509d9ae3f8-kube-api-access-5hm4r\") pod \"4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:01 crc kubenswrapper[4687]: I1125 09:19:01.809555 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:02 crc kubenswrapper[4687]: I1125 09:19:02.018173 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb"] Nov 25 09:19:02 crc kubenswrapper[4687]: I1125 09:19:02.464790 4687 generic.go:334] "Generic (PLEG): container finished" podID="989d467f-e529-48b1-ac6b-f8509d9ae3f8" containerID="f0876f26eaa5667e4f401602dfd1a720d2f66b1ce1bd4a9903b82ccf4dd2ead5" exitCode=0 Nov 25 09:19:02 crc kubenswrapper[4687]: I1125 09:19:02.464845 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" event={"ID":"989d467f-e529-48b1-ac6b-f8509d9ae3f8","Type":"ContainerDied","Data":"f0876f26eaa5667e4f401602dfd1a720d2f66b1ce1bd4a9903b82ccf4dd2ead5"} Nov 25 09:19:02 crc kubenswrapper[4687]: I1125 09:19:02.464897 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" event={"ID":"989d467f-e529-48b1-ac6b-f8509d9ae3f8","Type":"ContainerStarted","Data":"56e55d7ff363ed343496838cb60ac72a2e1c5c7dd5d81335de50ce4b7e75cca1"} Nov 25 09:19:03 crc kubenswrapper[4687]: I1125 09:19:03.474087 4687 generic.go:334] "Generic (PLEG): container finished" podID="989d467f-e529-48b1-ac6b-f8509d9ae3f8" containerID="a08c959fb41021c15b1aa782678f6b8addd16ef35ee698e37a4851d8606aa9b0" exitCode=0 Nov 25 09:19:03 crc kubenswrapper[4687]: I1125 09:19:03.474151 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" event={"ID":"989d467f-e529-48b1-ac6b-f8509d9ae3f8","Type":"ContainerDied","Data":"a08c959fb41021c15b1aa782678f6b8addd16ef35ee698e37a4851d8606aa9b0"} Nov 25 09:19:04 crc kubenswrapper[4687]: I1125 09:19:04.482184 4687 generic.go:334] "Generic (PLEG): container finished" podID="989d467f-e529-48b1-ac6b-f8509d9ae3f8" containerID="5e709512638c6e8eae2cee0d5beb294445d4227a726b2d4dfdd83a1f417336f9" exitCode=0 Nov 25 09:19:04 crc kubenswrapper[4687]: I1125 09:19:04.482290 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" event={"ID":"989d467f-e529-48b1-ac6b-f8509d9ae3f8","Type":"ContainerDied","Data":"5e709512638c6e8eae2cee0d5beb294445d4227a726b2d4dfdd83a1f417336f9"} Nov 25 09:19:05 crc kubenswrapper[4687]: I1125 09:19:05.819514 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.004273 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-bundle\") pod \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.004701 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hm4r\" (UniqueName: \"kubernetes.io/projected/989d467f-e529-48b1-ac6b-f8509d9ae3f8-kube-api-access-5hm4r\") pod \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.004740 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-util\") pod \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\" (UID: \"989d467f-e529-48b1-ac6b-f8509d9ae3f8\") " Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.006035 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-bundle" (OuterVolumeSpecName: "bundle") pod "989d467f-e529-48b1-ac6b-f8509d9ae3f8" (UID: "989d467f-e529-48b1-ac6b-f8509d9ae3f8"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.013880 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/989d467f-e529-48b1-ac6b-f8509d9ae3f8-kube-api-access-5hm4r" (OuterVolumeSpecName: "kube-api-access-5hm4r") pod "989d467f-e529-48b1-ac6b-f8509d9ae3f8" (UID: "989d467f-e529-48b1-ac6b-f8509d9ae3f8"). InnerVolumeSpecName "kube-api-access-5hm4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.021041 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-util" (OuterVolumeSpecName: "util") pod "989d467f-e529-48b1-ac6b-f8509d9ae3f8" (UID: "989d467f-e529-48b1-ac6b-f8509d9ae3f8"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.106353 4687 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.106412 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hm4r\" (UniqueName: \"kubernetes.io/projected/989d467f-e529-48b1-ac6b-f8509d9ae3f8-kube-api-access-5hm4r\") on node \"crc\" DevicePath \"\"" Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.106427 4687 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/989d467f-e529-48b1-ac6b-f8509d9ae3f8-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.498047 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" event={"ID":"989d467f-e529-48b1-ac6b-f8509d9ae3f8","Type":"ContainerDied","Data":"56e55d7ff363ed343496838cb60ac72a2e1c5c7dd5d81335de50ce4b7e75cca1"} Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.498098 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56e55d7ff363ed343496838cb60ac72a2e1c5c7dd5d81335de50ce4b7e75cca1" Nov 25 09:19:06 crc kubenswrapper[4687]: I1125 09:19:06.498174 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.466109 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9"] Nov 25 09:19:13 crc kubenswrapper[4687]: E1125 09:19:13.466897 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="989d467f-e529-48b1-ac6b-f8509d9ae3f8" containerName="util" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.466910 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="989d467f-e529-48b1-ac6b-f8509d9ae3f8" containerName="util" Nov 25 09:19:13 crc kubenswrapper[4687]: E1125 09:19:13.466923 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="989d467f-e529-48b1-ac6b-f8509d9ae3f8" containerName="pull" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.466928 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="989d467f-e529-48b1-ac6b-f8509d9ae3f8" containerName="pull" Nov 25 09:19:13 crc kubenswrapper[4687]: E1125 09:19:13.466937 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="989d467f-e529-48b1-ac6b-f8509d9ae3f8" containerName="extract" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.466943 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="989d467f-e529-48b1-ac6b-f8509d9ae3f8" containerName="extract" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.467043 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="989d467f-e529-48b1-ac6b-f8509d9ae3f8" containerName="extract" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.467442 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.469979 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-4hndq" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.551270 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9"] Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.608332 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf4r2\" (UniqueName: \"kubernetes.io/projected/35a98c5c-b3b4-4e95-821d-923a693b67e0-kube-api-access-nf4r2\") pod \"openstack-operator-controller-operator-5888c99dff-lmgz9\" (UID: \"35a98c5c-b3b4-4e95-821d-923a693b67e0\") " pod="openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.709704 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4r2\" (UniqueName: \"kubernetes.io/projected/35a98c5c-b3b4-4e95-821d-923a693b67e0-kube-api-access-nf4r2\") pod \"openstack-operator-controller-operator-5888c99dff-lmgz9\" (UID: \"35a98c5c-b3b4-4e95-821d-923a693b67e0\") " pod="openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.732946 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf4r2\" (UniqueName: \"kubernetes.io/projected/35a98c5c-b3b4-4e95-821d-923a693b67e0-kube-api-access-nf4r2\") pod \"openstack-operator-controller-operator-5888c99dff-lmgz9\" (UID: \"35a98c5c-b3b4-4e95-821d-923a693b67e0\") " pod="openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9" Nov 25 09:19:13 crc kubenswrapper[4687]: I1125 09:19:13.786240 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9" Nov 25 09:19:14 crc kubenswrapper[4687]: I1125 09:19:14.202013 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9"] Nov 25 09:19:14 crc kubenswrapper[4687]: I1125 09:19:14.544349 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9" event={"ID":"35a98c5c-b3b4-4e95-821d-923a693b67e0","Type":"ContainerStarted","Data":"c2feb4c5def7db7d828dcdb7c144f63e0ccc1dab157a6aaf2960c53c171598a3"} Nov 25 09:19:19 crc kubenswrapper[4687]: I1125 09:19:19.586927 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9" event={"ID":"35a98c5c-b3b4-4e95-821d-923a693b67e0","Type":"ContainerStarted","Data":"e14dd63691d70e028a09441346c714390515f8fe70d2d574d11160808239c2d5"} Nov 25 09:19:19 crc kubenswrapper[4687]: I1125 09:19:19.588451 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9" Nov 25 09:19:19 crc kubenswrapper[4687]: I1125 09:19:19.618047 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9" podStartSLOduration=2.184338234 podStartE2EDuration="6.618027809s" podCreationTimestamp="2025-11-25 09:19:13 +0000 UTC" firstStartedPulling="2025-11-25 09:19:14.209087447 +0000 UTC m=+949.262727165" lastFinishedPulling="2025-11-25 09:19:18.642777022 +0000 UTC m=+953.696416740" observedRunningTime="2025-11-25 09:19:19.613848135 +0000 UTC m=+954.667487863" watchObservedRunningTime="2025-11-25 09:19:19.618027809 +0000 UTC m=+954.671667527" Nov 25 09:19:23 crc kubenswrapper[4687]: I1125 09:19:23.788975 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5888c99dff-lmgz9" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.165662 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.167222 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.176653 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-5hp7d" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.177972 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.179396 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.180611 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-kqgdn" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.185658 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.188853 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzsmz\" (UniqueName: \"kubernetes.io/projected/a5f5d45b-b0ce-48f8-892e-02571e1f9f24-kube-api-access-mzsmz\") pod \"barbican-operator-controller-manager-86dc4d89c8-4dn9x\" (UID: \"a5f5d45b-b0ce-48f8-892e-02571e1f9f24\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.188917 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k4nc\" (UniqueName: \"kubernetes.io/projected/26dc2622-a74f-405c-9bbb-291adb145908-kube-api-access-6k4nc\") pod \"cinder-operator-controller-manager-79856dc55c-ckt6f\" (UID: \"26dc2622-a74f-405c-9bbb-291adb145908\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.201497 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.208577 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.209557 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.212312 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-5bdnv" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.232568 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.233631 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.238003 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-52pqw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.267031 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.268431 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.271334 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-nmq6r" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.275099 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.282645 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.292021 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzsmz\" (UniqueName: \"kubernetes.io/projected/a5f5d45b-b0ce-48f8-892e-02571e1f9f24-kube-api-access-mzsmz\") pod \"barbican-operator-controller-manager-86dc4d89c8-4dn9x\" (UID: \"a5f5d45b-b0ce-48f8-892e-02571e1f9f24\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.292093 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sdmv\" (UniqueName: \"kubernetes.io/projected/ed7a2e30-6110-4b1c-864f-4856c4c0ec8a-kube-api-access-4sdmv\") pod \"glance-operator-controller-manager-68b95954c9-wxt5h\" (UID: \"ed7a2e30-6110-4b1c-864f-4856c4c0ec8a\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.292129 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k4nc\" (UniqueName: \"kubernetes.io/projected/26dc2622-a74f-405c-9bbb-291adb145908-kube-api-access-6k4nc\") pod \"cinder-operator-controller-manager-79856dc55c-ckt6f\" (UID: \"26dc2622-a74f-405c-9bbb-291adb145908\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.292192 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxk4p\" (UniqueName: \"kubernetes.io/projected/9c7f6da9-8178-4c3f-b565-9f6eca26c6c7-kube-api-access-cxk4p\") pod \"heat-operator-controller-manager-774b86978c-zwv7l\" (UID: \"9c7f6da9-8178-4c3f-b565-9f6eca26c6c7\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.292226 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jt95j\" (UniqueName: \"kubernetes.io/projected/4d75764d-49d9-4482-98a9-728dd977f2bd-kube-api-access-jt95j\") pod \"designate-operator-controller-manager-7d695c9b56-jcrff\" (UID: \"4d75764d-49d9-4482-98a9-728dd977f2bd\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.305920 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.328269 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.350907 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k4nc\" (UniqueName: \"kubernetes.io/projected/26dc2622-a74f-405c-9bbb-291adb145908-kube-api-access-6k4nc\") pod \"cinder-operator-controller-manager-79856dc55c-ckt6f\" (UID: \"26dc2622-a74f-405c-9bbb-291adb145908\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.370709 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzsmz\" (UniqueName: \"kubernetes.io/projected/a5f5d45b-b0ce-48f8-892e-02571e1f9f24-kube-api-access-mzsmz\") pod \"barbican-operator-controller-manager-86dc4d89c8-4dn9x\" (UID: \"a5f5d45b-b0ce-48f8-892e-02571e1f9f24\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.384252 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.386805 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-bkbn9" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.395477 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxk4p\" (UniqueName: \"kubernetes.io/projected/9c7f6da9-8178-4c3f-b565-9f6eca26c6c7-kube-api-access-cxk4p\") pod \"heat-operator-controller-manager-774b86978c-zwv7l\" (UID: \"9c7f6da9-8178-4c3f-b565-9f6eca26c6c7\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.395547 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jt95j\" (UniqueName: \"kubernetes.io/projected/4d75764d-49d9-4482-98a9-728dd977f2bd-kube-api-access-jt95j\") pod \"designate-operator-controller-manager-7d695c9b56-jcrff\" (UID: \"4d75764d-49d9-4482-98a9-728dd977f2bd\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.395604 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdjv5\" (UniqueName: \"kubernetes.io/projected/956b1e07-e4b1-44cf-9990-ae928a3e11c7-kube-api-access-rdjv5\") pod \"horizon-operator-controller-manager-68c9694994-lsr97\" (UID: \"956b1e07-e4b1-44cf-9990-ae928a3e11c7\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.395648 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sdmv\" (UniqueName: \"kubernetes.io/projected/ed7a2e30-6110-4b1c-864f-4856c4c0ec8a-kube-api-access-4sdmv\") pod \"glance-operator-controller-manager-68b95954c9-wxt5h\" (UID: \"ed7a2e30-6110-4b1c-864f-4856c4c0ec8a\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.465910 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.465991 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sdmv\" (UniqueName: \"kubernetes.io/projected/ed7a2e30-6110-4b1c-864f-4856c4c0ec8a-kube-api-access-4sdmv\") pod \"glance-operator-controller-manager-68b95954c9-wxt5h\" (UID: \"ed7a2e30-6110-4b1c-864f-4856c4c0ec8a\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.467060 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.473080 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-5gs8n" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.485439 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.486528 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.492966 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jt95j\" (UniqueName: \"kubernetes.io/projected/4d75764d-49d9-4482-98a9-728dd977f2bd-kube-api-access-jt95j\") pod \"designate-operator-controller-manager-7d695c9b56-jcrff\" (UID: \"4d75764d-49d9-4482-98a9-728dd977f2bd\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.497078 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxk4p\" (UniqueName: \"kubernetes.io/projected/9c7f6da9-8178-4c3f-b565-9f6eca26c6c7-kube-api-access-cxk4p\") pod \"heat-operator-controller-manager-774b86978c-zwv7l\" (UID: \"9c7f6da9-8178-4c3f-b565-9f6eca26c6c7\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.497460 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrd4d\" (UniqueName: \"kubernetes.io/projected/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-kube-api-access-qrd4d\") pod \"infra-operator-controller-manager-d5cc86f4b-w2vc9\" (UID: \"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.497544 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-w2vc9\" (UID: \"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.497611 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdjv5\" (UniqueName: \"kubernetes.io/projected/956b1e07-e4b1-44cf-9990-ae928a3e11c7-kube-api-access-rdjv5\") pod \"horizon-operator-controller-manager-68c9694994-lsr97\" (UID: \"956b1e07-e4b1-44cf-9990-ae928a3e11c7\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.499780 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.509646 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.514624 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.537087 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.541382 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdjv5\" (UniqueName: \"kubernetes.io/projected/956b1e07-e4b1-44cf-9990-ae928a3e11c7-kube-api-access-rdjv5\") pod \"horizon-operator-controller-manager-68c9694994-lsr97\" (UID: \"956b1e07-e4b1-44cf-9990-ae928a3e11c7\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.552803 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.560017 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.561192 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.565374 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-vq9rb" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.583562 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.584718 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.589000 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-h4rxt" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.593358 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.608094 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cccs6\" (UniqueName: \"kubernetes.io/projected/dc4b5a7c-5e58-42a7-b1ee-676268f99e21-kube-api-access-cccs6\") pod \"keystone-operator-controller-manager-748dc6576f-khvhw\" (UID: \"dc4b5a7c-5e58-42a7-b1ee-676268f99e21\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.608202 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrd4d\" (UniqueName: \"kubernetes.io/projected/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-kube-api-access-qrd4d\") pod \"infra-operator-controller-manager-d5cc86f4b-w2vc9\" (UID: \"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.608222 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.608232 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-w2vc9\" (UID: \"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.608289 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nvp8\" (UniqueName: \"kubernetes.io/projected/7c015be6-1e7f-404b-9ea0-31cbec410081-kube-api-access-9nvp8\") pod \"ironic-operator-controller-manager-5bfcdc958c-7r5w5\" (UID: \"7c015be6-1e7f-404b-9ea0-31cbec410081\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" Nov 25 09:19:40 crc kubenswrapper[4687]: E1125 09:19:40.608536 4687 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 09:19:40 crc kubenswrapper[4687]: E1125 09:19:40.608583 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-cert podName:0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14 nodeName:}" failed. No retries permitted until 2025-11-25 09:19:41.108566887 +0000 UTC m=+976.162206605 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-cert") pod "infra-operator-controller-manager-d5cc86f4b-w2vc9" (UID: "0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14") : secret "infra-operator-webhook-server-cert" not found Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.609472 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.629800 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-vqgqd" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.633709 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.651766 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.670896 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.699768 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.700792 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.704115 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-qj6nv" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.707964 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrd4d\" (UniqueName: \"kubernetes.io/projected/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-kube-api-access-qrd4d\") pod \"infra-operator-controller-manager-d5cc86f4b-w2vc9\" (UID: \"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.709722 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nvp8\" (UniqueName: \"kubernetes.io/projected/7c015be6-1e7f-404b-9ea0-31cbec410081-kube-api-access-9nvp8\") pod \"ironic-operator-controller-manager-5bfcdc958c-7r5w5\" (UID: \"7c015be6-1e7f-404b-9ea0-31cbec410081\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.709758 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cccs6\" (UniqueName: \"kubernetes.io/projected/dc4b5a7c-5e58-42a7-b1ee-676268f99e21-kube-api-access-cccs6\") pod \"keystone-operator-controller-manager-748dc6576f-khvhw\" (UID: \"dc4b5a7c-5e58-42a7-b1ee-676268f99e21\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.709794 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cqzk\" (UniqueName: \"kubernetes.io/projected/b07b54a0-d4b4-49e3-bd03-810eeefa6fa7-kube-api-access-7cqzk\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-qfcjj\" (UID: \"b07b54a0-d4b4-49e3-bd03-810eeefa6fa7\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.709836 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4w69k\" (UniqueName: \"kubernetes.io/projected/469145fd-b998-4c0a-b356-508c4940f78b-kube-api-access-4w69k\") pod \"manila-operator-controller-manager-58bb8d67cc-cf25t\" (UID: \"469145fd-b998-4c0a-b356-508c4940f78b\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.716904 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.728908 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.730094 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.730177 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.736548 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nvp8\" (UniqueName: \"kubernetes.io/projected/7c015be6-1e7f-404b-9ea0-31cbec410081-kube-api-access-9nvp8\") pod \"ironic-operator-controller-manager-5bfcdc958c-7r5w5\" (UID: \"7c015be6-1e7f-404b-9ea0-31cbec410081\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.738753 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.757263 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cccs6\" (UniqueName: \"kubernetes.io/projected/dc4b5a7c-5e58-42a7-b1ee-676268f99e21-kube-api-access-cccs6\") pod \"keystone-operator-controller-manager-748dc6576f-khvhw\" (UID: \"dc4b5a7c-5e58-42a7-b1ee-676268f99e21\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.764929 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.765055 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.770823 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.771795 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.776764 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-b9zbw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.782740 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.786683 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-2h2nb" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.786875 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-zhwpg" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.792769 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.811823 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cqzk\" (UniqueName: \"kubernetes.io/projected/b07b54a0-d4b4-49e3-bd03-810eeefa6fa7-kube-api-access-7cqzk\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-qfcjj\" (UID: \"b07b54a0-d4b4-49e3-bd03-810eeefa6fa7\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.811865 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4w69k\" (UniqueName: \"kubernetes.io/projected/469145fd-b998-4c0a-b356-508c4940f78b-kube-api-access-4w69k\") pod \"manila-operator-controller-manager-58bb8d67cc-cf25t\" (UID: \"469145fd-b998-4c0a-b356-508c4940f78b\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.811935 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cf7pc\" (UniqueName: \"kubernetes.io/projected/555e5cf5-f2f8-46f2-ab17-8589c7391fc8-kube-api-access-cf7pc\") pod \"neutron-operator-controller-manager-7c57c8bbc4-qzdnw\" (UID: \"555e5cf5-f2f8-46f2-ab17-8589c7391fc8\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.811959 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2n6m\" (UniqueName: \"kubernetes.io/projected/78edbfb4-5838-4c2d-a4e3-e1512bb55654-kube-api-access-f2n6m\") pod \"octavia-operator-controller-manager-fd75fd47d-z59ft\" (UID: \"78edbfb4-5838-4c2d-a4e3-e1512bb55654\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.811990 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7q7fj\" (UniqueName: \"kubernetes.io/projected/62a14c39-245f-4c8d-84b5-b23d023d810f-kube-api-access-7q7fj\") pod \"nova-operator-controller-manager-79556f57fc-99k8n\" (UID: \"62a14c39-245f-4c8d-84b5-b23d023d810f\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.818595 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.823822 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.837210 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-xgm78" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.844836 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cqzk\" (UniqueName: \"kubernetes.io/projected/b07b54a0-d4b4-49e3-bd03-810eeefa6fa7-kube-api-access-7cqzk\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-qfcjj\" (UID: \"b07b54a0-d4b4-49e3-bd03-810eeefa6fa7\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.858098 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w69k\" (UniqueName: \"kubernetes.io/projected/469145fd-b998-4c0a-b356-508c4940f78b-kube-api-access-4w69k\") pod \"manila-operator-controller-manager-58bb8d67cc-cf25t\" (UID: \"469145fd-b998-4c0a-b356-508c4940f78b\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.870731 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.871729 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.877886 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-9qwcx" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.890789 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.892183 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.892332 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.902861 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.909224 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-f6rbz" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.909363 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.915196 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cf7pc\" (UniqueName: \"kubernetes.io/projected/555e5cf5-f2f8-46f2-ab17-8589c7391fc8-kube-api-access-cf7pc\") pod \"neutron-operator-controller-manager-7c57c8bbc4-qzdnw\" (UID: \"555e5cf5-f2f8-46f2-ab17-8589c7391fc8\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.915234 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5\" (UID: \"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.915259 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2n6m\" (UniqueName: \"kubernetes.io/projected/78edbfb4-5838-4c2d-a4e3-e1512bb55654-kube-api-access-f2n6m\") pod \"octavia-operator-controller-manager-fd75fd47d-z59ft\" (UID: \"78edbfb4-5838-4c2d-a4e3-e1512bb55654\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.915296 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7q7fj\" (UniqueName: \"kubernetes.io/projected/62a14c39-245f-4c8d-84b5-b23d023d810f-kube-api-access-7q7fj\") pod \"nova-operator-controller-manager-79556f57fc-99k8n\" (UID: \"62a14c39-245f-4c8d-84b5-b23d023d810f\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.915329 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pfl7\" (UniqueName: \"kubernetes.io/projected/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-kube-api-access-6pfl7\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5\" (UID: \"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.915357 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hfz9\" (UniqueName: \"kubernetes.io/projected/be89cce1-89d8-47da-b777-f7805762b230-kube-api-access-5hfz9\") pod \"ovn-operator-controller-manager-66cf5c67ff-c8plr\" (UID: \"be89cce1-89d8-47da-b777-f7805762b230\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.915397 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nr94\" (UniqueName: \"kubernetes.io/projected/99367bef-5882-4884-8fe5-9a3ff8edd1cb-kube-api-access-8nr94\") pod \"placement-operator-controller-manager-5db546f9d9-qsrlz\" (UID: \"99367bef-5882-4884-8fe5-9a3ff8edd1cb\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.934350 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.944349 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2n6m\" (UniqueName: \"kubernetes.io/projected/78edbfb4-5838-4c2d-a4e3-e1512bb55654-kube-api-access-f2n6m\") pod \"octavia-operator-controller-manager-fd75fd47d-z59ft\" (UID: \"78edbfb4-5838-4c2d-a4e3-e1512bb55654\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.963108 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7q7fj\" (UniqueName: \"kubernetes.io/projected/62a14c39-245f-4c8d-84b5-b23d023d810f-kube-api-access-7q7fj\") pod \"nova-operator-controller-manager-79556f57fc-99k8n\" (UID: \"62a14c39-245f-4c8d-84b5-b23d023d810f\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.963545 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cf7pc\" (UniqueName: \"kubernetes.io/projected/555e5cf5-f2f8-46f2-ab17-8589c7391fc8-kube-api-access-cf7pc\") pod \"neutron-operator-controller-manager-7c57c8bbc4-qzdnw\" (UID: \"555e5cf5-f2f8-46f2-ab17-8589c7391fc8\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.970283 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.970437 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.983589 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.984913 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.987865 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-kqczx" Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.995752 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76"] Nov 25 09:19:40 crc kubenswrapper[4687]: I1125 09:19:40.996962 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.001290 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-twnm9" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.048466 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.050991 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nr94\" (UniqueName: \"kubernetes.io/projected/99367bef-5882-4884-8fe5-9a3ff8edd1cb-kube-api-access-8nr94\") pod \"placement-operator-controller-manager-5db546f9d9-qsrlz\" (UID: \"99367bef-5882-4884-8fe5-9a3ff8edd1cb\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.051074 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5\" (UID: \"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.051163 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pfl7\" (UniqueName: \"kubernetes.io/projected/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-kube-api-access-6pfl7\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5\" (UID: \"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.051199 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hfz9\" (UniqueName: \"kubernetes.io/projected/be89cce1-89d8-47da-b777-f7805762b230-kube-api-access-5hfz9\") pod \"ovn-operator-controller-manager-66cf5c67ff-c8plr\" (UID: \"be89cce1-89d8-47da-b777-f7805762b230\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" Nov 25 09:19:41 crc kubenswrapper[4687]: E1125 09:19:41.051801 4687 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:19:41 crc kubenswrapper[4687]: E1125 09:19:41.051857 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-cert podName:9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be nodeName:}" failed. No retries permitted until 2025-11-25 09:19:41.551831772 +0000 UTC m=+976.605471490 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" (UID: "9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.080602 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.093666 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hfz9\" (UniqueName: \"kubernetes.io/projected/be89cce1-89d8-47da-b777-f7805762b230-kube-api-access-5hfz9\") pod \"ovn-operator-controller-manager-66cf5c67ff-c8plr\" (UID: \"be89cce1-89d8-47da-b777-f7805762b230\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.093704 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pfl7\" (UniqueName: \"kubernetes.io/projected/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-kube-api-access-6pfl7\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5\" (UID: \"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.093832 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.107206 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nr94\" (UniqueName: \"kubernetes.io/projected/99367bef-5882-4884-8fe5-9a3ff8edd1cb-kube-api-access-8nr94\") pod \"placement-operator-controller-manager-5db546f9d9-qsrlz\" (UID: \"99367bef-5882-4884-8fe5-9a3ff8edd1cb\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.140217 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.143937 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.162437 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t7xw\" (UniqueName: \"kubernetes.io/projected/ac5487a2-ce65-4034-973b-b939494aef63-kube-api-access-7t7xw\") pod \"swift-operator-controller-manager-6fdc4fcf86-pqd76\" (UID: \"ac5487a2-ce65-4034-973b-b939494aef63\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.162551 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-w2vc9\" (UID: \"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.162580 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqzml\" (UniqueName: \"kubernetes.io/projected/4a1d4849-2906-4fd5-b54e-7f2e567f05ef-kube-api-access-sqzml\") pod \"telemetry-operator-controller-manager-567f98c9d-s9vtg\" (UID: \"4a1d4849-2906-4fd5-b54e-7f2e567f05ef\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" Nov 25 09:19:41 crc kubenswrapper[4687]: E1125 09:19:41.162863 4687 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 09:19:41 crc kubenswrapper[4687]: E1125 09:19:41.162928 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-cert podName:0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14 nodeName:}" failed. No retries permitted until 2025-11-25 09:19:42.16290525 +0000 UTC m=+977.216544968 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-cert") pod "infra-operator-controller-manager-d5cc86f4b-w2vc9" (UID: "0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14") : secret "infra-operator-webhook-server-cert" not found Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.163212 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.171914 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.178137 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.188207 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-tn2ws" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.192258 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.210613 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-ll9p9"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.210893 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.211797 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.217752 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-trrpx" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.218048 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-ll9p9"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.242443 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.264236 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t7xw\" (UniqueName: \"kubernetes.io/projected/ac5487a2-ce65-4034-973b-b939494aef63-kube-api-access-7t7xw\") pod \"swift-operator-controller-manager-6fdc4fcf86-pqd76\" (UID: \"ac5487a2-ce65-4034-973b-b939494aef63\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.264306 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqzml\" (UniqueName: \"kubernetes.io/projected/4a1d4849-2906-4fd5-b54e-7f2e567f05ef-kube-api-access-sqzml\") pod \"telemetry-operator-controller-manager-567f98c9d-s9vtg\" (UID: \"4a1d4849-2906-4fd5-b54e-7f2e567f05ef\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.270618 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.293442 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.294792 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.300975 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.301105 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-pql6m" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.301449 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.308029 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t7xw\" (UniqueName: \"kubernetes.io/projected/ac5487a2-ce65-4034-973b-b939494aef63-kube-api-access-7t7xw\") pod \"swift-operator-controller-manager-6fdc4fcf86-pqd76\" (UID: \"ac5487a2-ce65-4034-973b-b939494aef63\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.313914 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.347221 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqzml\" (UniqueName: \"kubernetes.io/projected/4a1d4849-2906-4fd5-b54e-7f2e567f05ef-kube-api-access-sqzml\") pod \"telemetry-operator-controller-manager-567f98c9d-s9vtg\" (UID: \"4a1d4849-2906-4fd5-b54e-7f2e567f05ef\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.365390 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6fc8\" (UniqueName: \"kubernetes.io/projected/c2ea5569-33b4-403d-9303-770ec432f4cc-kube-api-access-q6fc8\") pod \"watcher-operator-controller-manager-864885998-ll9p9\" (UID: \"c2ea5569-33b4-403d-9303-770ec432f4cc\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.365618 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcrsc\" (UniqueName: \"kubernetes.io/projected/9fac1200-5b4d-4032-98aa-d293d13fdcc7-kube-api-access-kcrsc\") pod \"test-operator-controller-manager-5cb74df96-7jmgj\" (UID: \"9fac1200-5b4d-4032-98aa-d293d13fdcc7\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.397230 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.398228 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.399122 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.399781 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-z6bjv" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.408833 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.410287 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.432639 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x"] Nov 25 09:19:41 crc kubenswrapper[4687]: W1125 09:19:41.448100 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5f5d45b_b0ce_48f8_892e_02571e1f9f24.slice/crio-40ca96a58bd4975f6ffcf61c1b8de0fef80167deadfcc3f8843751f22e6c4b72 WatchSource:0}: Error finding container 40ca96a58bd4975f6ffcf61c1b8de0fef80167deadfcc3f8843751f22e6c4b72: Status 404 returned error can't find the container with id 40ca96a58bd4975f6ffcf61c1b8de0fef80167deadfcc3f8843751f22e6c4b72 Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.450143 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.466357 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcrsc\" (UniqueName: \"kubernetes.io/projected/9fac1200-5b4d-4032-98aa-d293d13fdcc7-kube-api-access-kcrsc\") pod \"test-operator-controller-manager-5cb74df96-7jmgj\" (UID: \"9fac1200-5b4d-4032-98aa-d293d13fdcc7\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.466410 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpww2\" (UniqueName: \"kubernetes.io/projected/72c3a2af-6e0e-4862-b638-2694a71f1e5a-kube-api-access-wpww2\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.466438 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-metrics-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.466521 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6fc8\" (UniqueName: \"kubernetes.io/projected/c2ea5569-33b4-403d-9303-770ec432f4cc-kube-api-access-q6fc8\") pod \"watcher-operator-controller-manager-864885998-ll9p9\" (UID: \"c2ea5569-33b4-403d-9303-770ec432f4cc\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.466553 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.504463 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcrsc\" (UniqueName: \"kubernetes.io/projected/9fac1200-5b4d-4032-98aa-d293d13fdcc7-kube-api-access-kcrsc\") pod \"test-operator-controller-manager-5cb74df96-7jmgj\" (UID: \"9fac1200-5b4d-4032-98aa-d293d13fdcc7\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.515516 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6fc8\" (UniqueName: \"kubernetes.io/projected/c2ea5569-33b4-403d-9303-770ec432f4cc-kube-api-access-q6fc8\") pod \"watcher-operator-controller-manager-864885998-ll9p9\" (UID: \"c2ea5569-33b4-403d-9303-770ec432f4cc\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.574979 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.578399 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pf5rj\" (UniqueName: \"kubernetes.io/projected/5a4bd509-a298-4fff-845b-262a41634134-kube-api-access-pf5rj\") pod \"rabbitmq-cluster-operator-manager-668c99d594-6lbv2\" (UID: \"5a4bd509-a298-4fff-845b-262a41634134\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.578434 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.578487 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5\" (UID: \"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.578538 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpww2\" (UniqueName: \"kubernetes.io/projected/72c3a2af-6e0e-4862-b638-2694a71f1e5a-kube-api-access-wpww2\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.578555 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-metrics-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:41 crc kubenswrapper[4687]: E1125 09:19:41.578685 4687 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 09:19:41 crc kubenswrapper[4687]: E1125 09:19:41.578729 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-metrics-certs podName:72c3a2af-6e0e-4862-b638-2694a71f1e5a nodeName:}" failed. No retries permitted until 2025-11-25 09:19:42.078714973 +0000 UTC m=+977.132354691 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-metrics-certs") pod "openstack-operator-controller-manager-d664976d5-hdtkm" (UID: "72c3a2af-6e0e-4862-b638-2694a71f1e5a") : secret "metrics-server-cert" not found Nov 25 09:19:41 crc kubenswrapper[4687]: E1125 09:19:41.578969 4687 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:19:41 crc kubenswrapper[4687]: E1125 09:19:41.578992 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-cert podName:9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be nodeName:}" failed. No retries permitted until 2025-11-25 09:19:42.578984951 +0000 UTC m=+977.632624669 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" (UID: "9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:19:41 crc kubenswrapper[4687]: E1125 09:19:41.579041 4687 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 09:19:41 crc kubenswrapper[4687]: E1125 09:19:41.579109 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs podName:72c3a2af-6e0e-4862-b638-2694a71f1e5a nodeName:}" failed. No retries permitted until 2025-11-25 09:19:42.079085644 +0000 UTC m=+977.132725462 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs") pod "openstack-operator-controller-manager-d664976d5-hdtkm" (UID: "72c3a2af-6e0e-4862-b638-2694a71f1e5a") : secret "webhook-server-cert" not found Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.608731 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.626313 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpww2\" (UniqueName: \"kubernetes.io/projected/72c3a2af-6e0e-4862-b638-2694a71f1e5a-kube-api-access-wpww2\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.682222 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pf5rj\" (UniqueName: \"kubernetes.io/projected/5a4bd509-a298-4fff-845b-262a41634134-kube-api-access-pf5rj\") pod \"rabbitmq-cluster-operator-manager-668c99d594-6lbv2\" (UID: \"5a4bd509-a298-4fff-845b-262a41634134\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.714358 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pf5rj\" (UniqueName: \"kubernetes.io/projected/5a4bd509-a298-4fff-845b-262a41634134-kube-api-access-pf5rj\") pod \"rabbitmq-cluster-operator-manager-668c99d594-6lbv2\" (UID: \"5a4bd509-a298-4fff-845b-262a41634134\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.722929 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.754323 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" event={"ID":"26dc2622-a74f-405c-9bbb-291adb145908","Type":"ContainerStarted","Data":"294ec35ff3016e5449b4a60d6396c02cdaf6d537152475a5a0163e85d4d79a78"} Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.760178 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" event={"ID":"a5f5d45b-b0ce-48f8-892e-02571e1f9f24","Type":"ContainerStarted","Data":"40ca96a58bd4975f6ffcf61c1b8de0fef80167deadfcc3f8843751f22e6c4b72"} Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.893989 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.900851 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.927606 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l"] Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.928882 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" Nov 25 09:19:41 crc kubenswrapper[4687]: I1125 09:19:41.946746 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h"] Nov 25 09:19:41 crc kubenswrapper[4687]: W1125 09:19:41.991213 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c7f6da9_8178_4c3f_b565_9f6eca26c6c7.slice/crio-64447e9d7264c7e2af6b2a338c02a9d7e86cb5182b794a16d7b619f8d887052b WatchSource:0}: Error finding container 64447e9d7264c7e2af6b2a338c02a9d7e86cb5182b794a16d7b619f8d887052b: Status 404 returned error can't find the container with id 64447e9d7264c7e2af6b2a338c02a9d7e86cb5182b794a16d7b619f8d887052b Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.091397 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw"] Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.093198 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-metrics-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.093290 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.093528 4687 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.093601 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs podName:72c3a2af-6e0e-4862-b638-2694a71f1e5a nodeName:}" failed. No retries permitted until 2025-11-25 09:19:43.093575376 +0000 UTC m=+978.147215094 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs") pod "openstack-operator-controller-manager-d664976d5-hdtkm" (UID: "72c3a2af-6e0e-4862-b638-2694a71f1e5a") : secret "webhook-server-cert" not found Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.094081 4687 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.094131 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-metrics-certs podName:72c3a2af-6e0e-4862-b638-2694a71f1e5a nodeName:}" failed. No retries permitted until 2025-11-25 09:19:43.094118851 +0000 UTC m=+978.147758569 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-metrics-certs") pod "openstack-operator-controller-manager-d664976d5-hdtkm" (UID: "72c3a2af-6e0e-4862-b638-2694a71f1e5a") : secret "metrics-server-cert" not found Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.118340 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5"] Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.198905 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-w2vc9\" (UID: \"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.215323 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-w2vc9\" (UID: \"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.223578 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj"] Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.358751 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.492636 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft"] Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.500864 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw"] Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.515971 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz"] Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.526244 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr"] Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.536283 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-ll9p9"] Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.542838 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76"] Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.550099 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n"] Nov 25 09:19:42 crc kubenswrapper[4687]: W1125 09:19:42.582463 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod99367bef_5882_4884_8fe5_9a3ff8edd1cb.slice/crio-282c408a5345bdf9fe00fd180962e362bfaa0c53739b7e1a67ab5db66e80dca1 WatchSource:0}: Error finding container 282c408a5345bdf9fe00fd180962e362bfaa0c53739b7e1a67ab5db66e80dca1: Status 404 returned error can't find the container with id 282c408a5345bdf9fe00fd180962e362bfaa0c53739b7e1a67ab5db66e80dca1 Nov 25 09:19:42 crc kubenswrapper[4687]: W1125 09:19:42.599011 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2ea5569_33b4_403d_9303_770ec432f4cc.slice/crio-a95c58d6e46ad7437b08e056e733fd3476dd8543cf7383f54e58a17541ae26a5 WatchSource:0}: Error finding container a95c58d6e46ad7437b08e056e733fd3476dd8543cf7383f54e58a17541ae26a5: Status 404 returned error can't find the container with id a95c58d6e46ad7437b08e056e733fd3476dd8543cf7383f54e58a17541ae26a5 Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.610060 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5\" (UID: \"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.616164 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5\" (UID: \"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:19:42 crc kubenswrapper[4687]: W1125 09:19:42.617229 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac5487a2_ce65_4034_973b_b939494aef63.slice/crio-59e840b3da527f58fb67f26a24fa508b3fe26e56dfbd6f2893542cd7a97716c4 WatchSource:0}: Error finding container 59e840b3da527f58fb67f26a24fa508b3fe26e56dfbd6f2893542cd7a97716c4: Status 404 returned error can't find the container with id 59e840b3da527f58fb67f26a24fa508b3fe26e56dfbd6f2893542cd7a97716c4 Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.626781 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7t7xw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-pqd76_openstack-operators(ac5487a2-ce65-4034-973b-b939494aef63): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.626890 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5hfz9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-66cf5c67ff-c8plr_openstack-operators(be89cce1-89d8-47da-b777-f7805762b230): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.632878 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5hfz9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-66cf5c67ff-c8plr_openstack-operators(be89cce1-89d8-47da-b777-f7805762b230): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.633443 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7t7xw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-pqd76_openstack-operators(ac5487a2-ce65-4034-973b-b939494aef63): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.634548 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" podUID="be89cce1-89d8-47da-b777-f7805762b230" Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.634595 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" podUID="ac5487a2-ce65-4034-973b-b939494aef63" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.643317 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj"] Nov 25 09:19:42 crc kubenswrapper[4687]: W1125 09:19:42.652371 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a1d4849_2906_4fd5_b54e_7f2e567f05ef.slice/crio-9571632400c146a29b6605b5113abf72b375617b40a3e24d97c4ddd92877de3c WatchSource:0}: Error finding container 9571632400c146a29b6605b5113abf72b375617b40a3e24d97c4ddd92877de3c: Status 404 returned error can't find the container with id 9571632400c146a29b6605b5113abf72b375617b40a3e24d97c4ddd92877de3c Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.653038 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg"] Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.659475 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2"] Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.661921 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sqzml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-567f98c9d-s9vtg_openstack-operators(4a1d4849-2906-4fd5-b54e-7f2e567f05ef): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:19:42 crc kubenswrapper[4687]: W1125 09:19:42.664841 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9fac1200_5b4d_4032_98aa_d293d13fdcc7.slice/crio-e3b8d2e35f69d50a7a1041d2dc6fd679fc2f3d1cd1cf8eb4db62580e61d02db0 WatchSource:0}: Error finding container e3b8d2e35f69d50a7a1041d2dc6fd679fc2f3d1cd1cf8eb4db62580e61d02db0: Status 404 returned error can't find the container with id e3b8d2e35f69d50a7a1041d2dc6fd679fc2f3d1cd1cf8eb4db62580e61d02db0 Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.688064 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kcrsc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-7jmgj_openstack-operators(9fac1200-5b4d-4032-98aa-d293d13fdcc7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.705413 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kcrsc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-7jmgj_openstack-operators(9fac1200-5b4d-4032-98aa-d293d13fdcc7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.706962 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" podUID="9fac1200-5b4d-4032-98aa-d293d13fdcc7" Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.707535 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pf5rj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-6lbv2_openstack-operators(5a4bd509-a298-4fff-845b-262a41634134): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.708680 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" podUID="5a4bd509-a298-4fff-845b-262a41634134" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.768233 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" event={"ID":"dc4b5a7c-5e58-42a7-b1ee-676268f99e21","Type":"ContainerStarted","Data":"29a214bd838d1add163405b3cf76e5dee4d17ea3969e94367dcc6926606f0c6b"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.770455 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" event={"ID":"62a14c39-245f-4c8d-84b5-b23d023d810f","Type":"ContainerStarted","Data":"e029489e3f826972ef60d8efa2395042721b13904204a07cb8aae69ab76ca52d"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.772493 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" event={"ID":"9fac1200-5b4d-4032-98aa-d293d13fdcc7","Type":"ContainerStarted","Data":"e3b8d2e35f69d50a7a1041d2dc6fd679fc2f3d1cd1cf8eb4db62580e61d02db0"} Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.774777 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" podUID="9fac1200-5b4d-4032-98aa-d293d13fdcc7" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.775331 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" event={"ID":"ac5487a2-ce65-4034-973b-b939494aef63","Type":"ContainerStarted","Data":"59e840b3da527f58fb67f26a24fa508b3fe26e56dfbd6f2893542cd7a97716c4"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.776657 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" event={"ID":"4a1d4849-2906-4fd5-b54e-7f2e567f05ef","Type":"ContainerStarted","Data":"9571632400c146a29b6605b5113abf72b375617b40a3e24d97c4ddd92877de3c"} Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.777663 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" podUID="ac5487a2-ce65-4034-973b-b939494aef63" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.778174 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" event={"ID":"555e5cf5-f2f8-46f2-ab17-8589c7391fc8","Type":"ContainerStarted","Data":"4189cb4e6269d65f8a619c007139bff926d9e33fd4988849d54a19261d39ae75"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.780929 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" event={"ID":"c2ea5569-33b4-403d-9303-770ec432f4cc","Type":"ContainerStarted","Data":"a95c58d6e46ad7437b08e056e733fd3476dd8543cf7383f54e58a17541ae26a5"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.793453 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" event={"ID":"be89cce1-89d8-47da-b777-f7805762b230","Type":"ContainerStarted","Data":"300ef83af8b41116139cc78bec58e6543844b41a17a5e4a89b040a4e2061dc9d"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.803397 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" event={"ID":"99367bef-5882-4884-8fe5-9a3ff8edd1cb","Type":"ContainerStarted","Data":"282c408a5345bdf9fe00fd180962e362bfaa0c53739b7e1a67ab5db66e80dca1"} Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.803748 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" podUID="be89cce1-89d8-47da-b777-f7805762b230" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.811311 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" event={"ID":"ed7a2e30-6110-4b1c-864f-4856c4c0ec8a","Type":"ContainerStarted","Data":"dd5b6096b248efeeac3f6060d5453e7930271ddc928056d6a846bee920faf2b6"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.814437 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" event={"ID":"78edbfb4-5838-4c2d-a4e3-e1512bb55654","Type":"ContainerStarted","Data":"004407a52427f48d987980d0aaf7a08446a18720dc51f1521301b7fb1fa2874a"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.815834 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" event={"ID":"4d75764d-49d9-4482-98a9-728dd977f2bd","Type":"ContainerStarted","Data":"888d9116bdb4ffefa8da0c91b5707a1c84b174e21971e5d779b3361cc431606a"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.817786 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" event={"ID":"9c7f6da9-8178-4c3f-b565-9f6eca26c6c7","Type":"ContainerStarted","Data":"64447e9d7264c7e2af6b2a338c02a9d7e86cb5182b794a16d7b619f8d887052b"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.826583 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" event={"ID":"b07b54a0-d4b4-49e3-bd03-810eeefa6fa7","Type":"ContainerStarted","Data":"21524f7753834e93b3e234a382ae1472df38c1d70e34fd9b89789d4e5f6be9fa"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.829249 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" event={"ID":"7c015be6-1e7f-404b-9ea0-31cbec410081","Type":"ContainerStarted","Data":"1126e0e809449e3effe5db5a7f4390605e13ef515cf49dc95e04ed01b778849a"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.830409 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" event={"ID":"5a4bd509-a298-4fff-845b-262a41634134","Type":"ContainerStarted","Data":"e8e7af070f857140c28f8410852d786250c2867803d30901530080d48902a8c1"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.833265 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" event={"ID":"956b1e07-e4b1-44cf-9990-ae928a3e11c7","Type":"ContainerStarted","Data":"509247d729b77b0db0efd16c561bea74f3781739a21afd60fddd4df682810091"} Nov 25 09:19:42 crc kubenswrapper[4687]: E1125 09:19:42.833560 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" podUID="5a4bd509-a298-4fff-845b-262a41634134" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.834562 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" event={"ID":"469145fd-b998-4c0a-b356-508c4940f78b","Type":"ContainerStarted","Data":"9bd73e5fbc57a76db052a2234223f35544a76d46cabfd7e6cf6ea15f056e393f"} Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.848805 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:19:42 crc kubenswrapper[4687]: I1125 09:19:42.976005 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9"] Nov 25 09:19:42 crc kubenswrapper[4687]: W1125 09:19:42.987922 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f8d48bb_bbe7_4dd5_9a0c_5d5f769ebc14.slice/crio-7d51d3c42bc36fcf2ef026be703b2b25867bb242d96397c886b41a828af59ee8 WatchSource:0}: Error finding container 7d51d3c42bc36fcf2ef026be703b2b25867bb242d96397c886b41a828af59ee8: Status 404 returned error can't find the container with id 7d51d3c42bc36fcf2ef026be703b2b25867bb242d96397c886b41a828af59ee8 Nov 25 09:19:43 crc kubenswrapper[4687]: I1125 09:19:43.088300 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5"] Nov 25 09:19:43 crc kubenswrapper[4687]: W1125 09:19:43.095451 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ceb9cae_7f5d_4bfd_892d_a3eca9d5f0be.slice/crio-818b3daa71c2ddfb427a3ec35f3ba3999de92f6f1fb410b201efae752424d423 WatchSource:0}: Error finding container 818b3daa71c2ddfb427a3ec35f3ba3999de92f6f1fb410b201efae752424d423: Status 404 returned error can't find the container with id 818b3daa71c2ddfb427a3ec35f3ba3999de92f6f1fb410b201efae752424d423 Nov 25 09:19:43 crc kubenswrapper[4687]: I1125 09:19:43.116093 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:43 crc kubenswrapper[4687]: E1125 09:19:43.116265 4687 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 09:19:43 crc kubenswrapper[4687]: E1125 09:19:43.116324 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs podName:72c3a2af-6e0e-4862-b638-2694a71f1e5a nodeName:}" failed. No retries permitted until 2025-11-25 09:19:45.11630613 +0000 UTC m=+980.169945848 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs") pod "openstack-operator-controller-manager-d664976d5-hdtkm" (UID: "72c3a2af-6e0e-4862-b638-2694a71f1e5a") : secret "webhook-server-cert" not found Nov 25 09:19:43 crc kubenswrapper[4687]: I1125 09:19:43.116916 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-metrics-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:43 crc kubenswrapper[4687]: I1125 09:19:43.124851 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-metrics-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:43 crc kubenswrapper[4687]: I1125 09:19:43.841584 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" event={"ID":"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be","Type":"ContainerStarted","Data":"818b3daa71c2ddfb427a3ec35f3ba3999de92f6f1fb410b201efae752424d423"} Nov 25 09:19:43 crc kubenswrapper[4687]: I1125 09:19:43.843133 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" event={"ID":"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14","Type":"ContainerStarted","Data":"7d51d3c42bc36fcf2ef026be703b2b25867bb242d96397c886b41a828af59ee8"} Nov 25 09:19:43 crc kubenswrapper[4687]: E1125 09:19:43.845455 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" podUID="5a4bd509-a298-4fff-845b-262a41634134" Nov 25 09:19:43 crc kubenswrapper[4687]: E1125 09:19:43.846181 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" podUID="be89cce1-89d8-47da-b777-f7805762b230" Nov 25 09:19:43 crc kubenswrapper[4687]: E1125 09:19:43.846229 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" podUID="9fac1200-5b4d-4032-98aa-d293d13fdcc7" Nov 25 09:19:43 crc kubenswrapper[4687]: E1125 09:19:43.846254 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" podUID="ac5487a2-ce65-4034-973b-b939494aef63" Nov 25 09:19:45 crc kubenswrapper[4687]: I1125 09:19:45.148163 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:45 crc kubenswrapper[4687]: I1125 09:19:45.154030 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/72c3a2af-6e0e-4862-b638-2694a71f1e5a-webhook-certs\") pod \"openstack-operator-controller-manager-d664976d5-hdtkm\" (UID: \"72c3a2af-6e0e-4862-b638-2694a71f1e5a\") " pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:45 crc kubenswrapper[4687]: I1125 09:19:45.186049 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:19:53 crc kubenswrapper[4687]: I1125 09:19:53.844751 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:19:53 crc kubenswrapper[4687]: I1125 09:19:53.845541 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:19:57 crc kubenswrapper[4687]: E1125 09:19:57.313850 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:c6405d94e56b40ef669729216ab4b9c441f34bb280902efa2940038c076b560f" Nov 25 09:19:57 crc kubenswrapper[4687]: E1125 09:19:57.314313 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:c6405d94e56b40ef669729216ab4b9c441f34bb280902efa2940038c076b560f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jt95j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-7d695c9b56-jcrff_openstack-operators(4d75764d-49d9-4482-98a9-728dd977f2bd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:20:00 crc kubenswrapper[4687]: E1125 09:20:00.061199 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991" Nov 25 09:20:00 crc kubenswrapper[4687]: E1125 09:20:00.062084 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4sdmv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-68b95954c9-wxt5h_openstack-operators(ed7a2e30-6110-4b1c-864f-4856c4c0ec8a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:20:00 crc kubenswrapper[4687]: E1125 09:20:00.766905 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:5edd825a235f5784d9a65892763c5388c39df1731d0fcbf4ee33408b8c83ac96" Nov 25 09:20:00 crc kubenswrapper[4687]: E1125 09:20:00.767189 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:5edd825a235f5784d9a65892763c5388c39df1731d0fcbf4ee33408b8c83ac96,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cxk4p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-774b86978c-zwv7l_openstack-operators(9c7f6da9-8178-4c3f-b565-9f6eca26c6c7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:20:01 crc kubenswrapper[4687]: E1125 09:20:01.368258 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7" Nov 25 09:20:01 crc kubenswrapper[4687]: E1125 09:20:01.368427 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7q7fj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-99k8n_openstack-operators(62a14c39-245f-4c8d-84b5-b23d023d810f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:20:01 crc kubenswrapper[4687]: E1125 09:20:01.991520 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6" Nov 25 09:20:01 crc kubenswrapper[4687]: E1125 09:20:01.991744 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cf7pc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-7c57c8bbc4-qzdnw_openstack-operators(555e5cf5-f2f8-46f2-ab17-8589c7391fc8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:20:02 crc kubenswrapper[4687]: E1125 09:20:02.454121 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13" Nov 25 09:20:02 crc kubenswrapper[4687]: E1125 09:20:02.454619 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-f2n6m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-fd75fd47d-z59ft_openstack-operators(78edbfb4-5838-4c2d-a4e3-e1512bb55654): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:20:04 crc kubenswrapper[4687]: E1125 09:20:04.126185 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894" Nov 25 09:20:04 crc kubenswrapper[4687]: E1125 09:20:04.126428 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qrd4d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-d5cc86f4b-w2vc9_openstack-operators(0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:20:04 crc kubenswrapper[4687]: E1125 09:20:04.506128 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd" Nov 25 09:20:04 crc kubenswrapper[4687]: E1125 09:20:04.506684 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6pfl7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5_openstack-operators(9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:20:07 crc kubenswrapper[4687]: E1125 09:20:07.158770 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f" Nov 25 09:20:07 crc kubenswrapper[4687]: E1125 09:20:07.159236 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q6fc8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-ll9p9_openstack-operators(c2ea5569-33b4-403d-9303-770ec432f4cc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:20:07 crc kubenswrapper[4687]: I1125 09:20:07.568774 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm"] Nov 25 09:20:08 crc kubenswrapper[4687]: I1125 09:20:08.019112 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" event={"ID":"72c3a2af-6e0e-4862-b638-2694a71f1e5a","Type":"ContainerStarted","Data":"6bf867b070db8a4c7fd1a8b4876990757a2e617463273dacf1153d2b1abe925d"} Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.034741 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" event={"ID":"956b1e07-e4b1-44cf-9990-ae928a3e11c7","Type":"ContainerStarted","Data":"83e20328eeb7040776bbb677ca967b0bedc4a45913e510183d5bee7d8b0707bd"} Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.044342 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" event={"ID":"7c015be6-1e7f-404b-9ea0-31cbec410081","Type":"ContainerStarted","Data":"d5abc7615aa0d8a5cb6515f6a2132f82b2daa6a65946c01422f7002cb28c56a6"} Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.046288 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" event={"ID":"99367bef-5882-4884-8fe5-9a3ff8edd1cb","Type":"ContainerStarted","Data":"2c792d91a3bdd7584726677c60440356fdd27792a74cb83da69c765fb4c9f736"} Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.055776 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" event={"ID":"469145fd-b998-4c0a-b356-508c4940f78b","Type":"ContainerStarted","Data":"cdc2d7f5aa4c9e28490b806311c06a60afae9f0e9e0fb62fb4f2da4ef9ffa63d"} Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.064769 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" event={"ID":"26dc2622-a74f-405c-9bbb-291adb145908","Type":"ContainerStarted","Data":"62e096dac717c3a0105fcdc1b4056d9794ecb1eeed817854b4760d541a2bbd5d"} Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.067084 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" event={"ID":"72c3a2af-6e0e-4862-b638-2694a71f1e5a","Type":"ContainerStarted","Data":"22811288009d5a8f8888cc85597b0d4f8a0f5b73f6fe0e775d40ec1acde9c85d"} Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.067215 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.069131 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" event={"ID":"dc4b5a7c-5e58-42a7-b1ee-676268f99e21","Type":"ContainerStarted","Data":"29a052b107c7960d3dfc60f345f996c84fccd1a95315a6cb8ebc2d64636ea4db"} Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.071170 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" event={"ID":"b07b54a0-d4b4-49e3-bd03-810eeefa6fa7","Type":"ContainerStarted","Data":"41cdaabcfcc204e2d931060c61c777dd791fd0b1250e578988e8c3975d5fd46b"} Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.074080 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" event={"ID":"a5f5d45b-b0ce-48f8-892e-02571e1f9f24","Type":"ContainerStarted","Data":"6b0c0696dafd66456e9fea7e40cddd74c61aa01c02c28163b9a5171c17a9da89"} Nov 25 09:20:09 crc kubenswrapper[4687]: I1125 09:20:09.099830 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" podStartSLOduration=28.099809938 podStartE2EDuration="28.099809938s" podCreationTimestamp="2025-11-25 09:19:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:20:09.092142378 +0000 UTC m=+1004.145782096" watchObservedRunningTime="2025-11-25 09:20:09.099809938 +0000 UTC m=+1004.153449656" Nov 25 09:20:10 crc kubenswrapper[4687]: E1125 09:20:10.073592 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" podUID="555e5cf5-f2f8-46f2-ab17-8589c7391fc8" Nov 25 09:20:10 crc kubenswrapper[4687]: I1125 09:20:10.089443 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" event={"ID":"555e5cf5-f2f8-46f2-ab17-8589c7391fc8","Type":"ContainerStarted","Data":"671f4522270756f589ee2d20c332baf7b408392f54e926f23812de4f13fe81be"} Nov 25 09:20:10 crc kubenswrapper[4687]: I1125 09:20:10.091343 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" event={"ID":"5a4bd509-a298-4fff-845b-262a41634134","Type":"ContainerStarted","Data":"a9b13f1546864af8e606fd8923cc0a8e2765aa3f9d7a0e37db0c0b092d1c5d67"} Nov 25 09:20:10 crc kubenswrapper[4687]: I1125 09:20:10.098925 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" event={"ID":"ac5487a2-ce65-4034-973b-b939494aef63","Type":"ContainerStarted","Data":"133ccd64a2b9705ee4dcfac6271e8c8ef43a49ead3d068c7d6ce5e8150295e2f"} Nov 25 09:20:10 crc kubenswrapper[4687]: I1125 09:20:10.101633 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" event={"ID":"be89cce1-89d8-47da-b777-f7805762b230","Type":"ContainerStarted","Data":"4b08a26095daf36f481ba759b7795a76705bb12d2afc3338a9f3e9d4d0eec0aa"} Nov 25 09:20:10 crc kubenswrapper[4687]: E1125 09:20:10.105154 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" podUID="555e5cf5-f2f8-46f2-ab17-8589c7391fc8" Nov 25 09:20:10 crc kubenswrapper[4687]: I1125 09:20:10.139354 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6lbv2" podStartSLOduration=3.43461596 podStartE2EDuration="29.139338362s" podCreationTimestamp="2025-11-25 09:19:41 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.707335094 +0000 UTC m=+977.760974812" lastFinishedPulling="2025-11-25 09:20:08.412057496 +0000 UTC m=+1003.465697214" observedRunningTime="2025-11-25 09:20:10.131340263 +0000 UTC m=+1005.184979981" watchObservedRunningTime="2025-11-25 09:20:10.139338362 +0000 UTC m=+1005.192978080" Nov 25 09:20:10 crc kubenswrapper[4687]: E1125 09:20:10.373300 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" podUID="c2ea5569-33b4-403d-9303-770ec432f4cc" Nov 25 09:20:10 crc kubenswrapper[4687]: E1125 09:20:10.433149 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" podUID="4a1d4849-2906-4fd5-b54e-7f2e567f05ef" Nov 25 09:20:10 crc kubenswrapper[4687]: E1125 09:20:10.611712 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" podUID="62a14c39-245f-4c8d-84b5-b23d023d810f" Nov 25 09:20:10 crc kubenswrapper[4687]: E1125 09:20:10.675188 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" podUID="4d75764d-49d9-4482-98a9-728dd977f2bd" Nov 25 09:20:10 crc kubenswrapper[4687]: E1125 09:20:10.891933 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" podUID="78edbfb4-5838-4c2d-a4e3-e1512bb55654" Nov 25 09:20:10 crc kubenswrapper[4687]: E1125 09:20:10.906023 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" podUID="ed7a2e30-6110-4b1c-864f-4856c4c0ec8a" Nov 25 09:20:11 crc kubenswrapper[4687]: E1125 09:20:11.052197 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" podUID="9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be" Nov 25 09:20:11 crc kubenswrapper[4687]: E1125 09:20:11.102283 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" podUID="9c7f6da9-8178-4c3f-b565-9f6eca26c6c7" Nov 25 09:20:11 crc kubenswrapper[4687]: E1125 09:20:11.105896 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" podUID="0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.125901 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" event={"ID":"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14","Type":"ContainerStarted","Data":"b282e2b5aab52237225319715626947261158d8a569ceb8bf842ad81be23fd5c"} Nov 25 09:20:11 crc kubenswrapper[4687]: E1125 09:20:11.128217 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" podUID="0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.129690 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.139040 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" event={"ID":"7c015be6-1e7f-404b-9ea0-31cbec410081","Type":"ContainerStarted","Data":"63882a1a28e5daf849e59c73074bd82f74ecfd8e9494896df90761ef6cbda8c2"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.139173 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.151085 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" event={"ID":"469145fd-b998-4c0a-b356-508c4940f78b","Type":"ContainerStarted","Data":"2172901a0752c26c190b68c46c7f4a8409f935a905e0f30d9fe95c4b7c3e04eb"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.162157 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.185378 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" podStartSLOduration=2.56421457 podStartE2EDuration="31.18523855s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:41.916412921 +0000 UTC m=+976.970052639" lastFinishedPulling="2025-11-25 09:20:10.537436901 +0000 UTC m=+1005.591076619" observedRunningTime="2025-11-25 09:20:11.179957435 +0000 UTC m=+1006.233597153" watchObservedRunningTime="2025-11-25 09:20:11.18523855 +0000 UTC m=+1006.238878268" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.191015 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" event={"ID":"ed7a2e30-6110-4b1c-864f-4856c4c0ec8a","Type":"ContainerStarted","Data":"d768e69a1eecc360abff2fdd41e589b8ebb9f42cc543e12e17c798bc6cc1224b"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.193100 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" event={"ID":"4a1d4849-2906-4fd5-b54e-7f2e567f05ef","Type":"ContainerStarted","Data":"29f6c0a5a9bcb0e54e85521717de1892deecf82e3bb4d1e3abf6de21d46c643c"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.201020 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" event={"ID":"be89cce1-89d8-47da-b777-f7805762b230","Type":"ContainerStarted","Data":"f584eb8f318bb604d01f30a1d70b287bc0b56231346b36f1c3a0b81315777ceb"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.201671 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.205198 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" podStartSLOduration=3.423715439 podStartE2EDuration="31.205181785s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.21508294 +0000 UTC m=+977.268722658" lastFinishedPulling="2025-11-25 09:20:09.996549286 +0000 UTC m=+1005.050189004" observedRunningTime="2025-11-25 09:20:11.197389902 +0000 UTC m=+1006.251029620" watchObservedRunningTime="2025-11-25 09:20:11.205181785 +0000 UTC m=+1006.258821503" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.221757 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" event={"ID":"ac5487a2-ce65-4034-973b-b939494aef63","Type":"ContainerStarted","Data":"7888f2cfd71204356502994d7eefd637b6b3ae0edd31f86de124a214d6aae10a"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.222421 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.228922 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" event={"ID":"dc4b5a7c-5e58-42a7-b1ee-676268f99e21","Type":"ContainerStarted","Data":"34fff8999f50188441b01e2eb48c11a1c391612650d1a4fb3dbae9e91273f468"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.229023 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.241687 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" event={"ID":"b07b54a0-d4b4-49e3-bd03-810eeefa6fa7","Type":"ContainerStarted","Data":"d770f25729bb2692ff115359fa3176cb7b5b9f53f72216e2db7e63b0bc41321c"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.242146 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.255357 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" event={"ID":"4d75764d-49d9-4482-98a9-728dd977f2bd","Type":"ContainerStarted","Data":"b159000c806ebae093bda22bdff823494ff041392b85e3a29d2d2f0a5c765b86"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.257158 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" podStartSLOduration=3.019279537 podStartE2EDuration="31.257142426s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:41.912682209 +0000 UTC m=+976.966321927" lastFinishedPulling="2025-11-25 09:20:10.150545098 +0000 UTC m=+1005.204184816" observedRunningTime="2025-11-25 09:20:11.253812936 +0000 UTC m=+1006.307452654" watchObservedRunningTime="2025-11-25 09:20:11.257142426 +0000 UTC m=+1006.310782144" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.272736 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" event={"ID":"62a14c39-245f-4c8d-84b5-b23d023d810f","Type":"ContainerStarted","Data":"ccad871ab25d8abf8890618f060a6536391f6432e0c63a158bcd5acef3b5b5ea"} Nov 25 09:20:11 crc kubenswrapper[4687]: E1125 09:20:11.280617 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" podUID="62a14c39-245f-4c8d-84b5-b23d023d810f" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.282706 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" event={"ID":"9c7f6da9-8178-4c3f-b565-9f6eca26c6c7","Type":"ContainerStarted","Data":"9509a7f45e1d8675e2d689a07dfbd91f2eadf0c13b260441178d8e6517d2da20"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.287981 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" event={"ID":"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be","Type":"ContainerStarted","Data":"d0a7468cff2dfc22b104b2c545025645bf99ca5be9bfbefdc3ae0b0c13f2f649"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.294888 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" event={"ID":"26dc2622-a74f-405c-9bbb-291adb145908","Type":"ContainerStarted","Data":"6252eaa1242a5f5aab5d19e1772168518e692fc78ab7a0548714664965caaff3"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.295024 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" Nov 25 09:20:11 crc kubenswrapper[4687]: E1125 09:20:11.298757 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" podUID="9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.300904 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" event={"ID":"a5f5d45b-b0ce-48f8-892e-02571e1f9f24","Type":"ContainerStarted","Data":"91cb9d524275141113926e6b1dec33a1b69394dcf8abcff7662cfb157717bd55"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.301487 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.307594 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" event={"ID":"9fac1200-5b4d-4032-98aa-d293d13fdcc7","Type":"ContainerStarted","Data":"b49f6d3e45dae378714e6ec2251f864a7609c59b75e01bc1d40a0ebee08c122c"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.307644 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" event={"ID":"9fac1200-5b4d-4032-98aa-d293d13fdcc7","Type":"ContainerStarted","Data":"458cbb4a3e3b99525606c425a16b64936e04fe05c7b2bfa34c07c6a7f8474655"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.307817 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.314799 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" podStartSLOduration=3.030312338 podStartE2EDuration="31.314778943s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.222673947 +0000 UTC m=+977.276313665" lastFinishedPulling="2025-11-25 09:20:10.507140552 +0000 UTC m=+1005.560780270" observedRunningTime="2025-11-25 09:20:11.307653538 +0000 UTC m=+1006.361293246" watchObservedRunningTime="2025-11-25 09:20:11.314778943 +0000 UTC m=+1006.368418661" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.327025 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" event={"ID":"c2ea5569-33b4-403d-9303-770ec432f4cc","Type":"ContainerStarted","Data":"52a589d0d2fa9808f2ac370b5419f3a0e6b2ae7db5114dc8e0466b02a4ed1947"} Nov 25 09:20:11 crc kubenswrapper[4687]: E1125 09:20:11.328680 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" podUID="c2ea5569-33b4-403d-9303-770ec432f4cc" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.359113 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" event={"ID":"99367bef-5882-4884-8fe5-9a3ff8edd1cb","Type":"ContainerStarted","Data":"f3c9deaa3f1d5f3186af1e7422fe5b49bf7c57b0c4880dc0ab27b0ba47f27414"} Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.359254 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.373592 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" event={"ID":"78edbfb4-5838-4c2d-a4e3-e1512bb55654","Type":"ContainerStarted","Data":"a2a3ed5a49c8afdcb979dfe650c936de14eb1d4d12095f25d3cc769465c731c8"} Nov 25 09:20:11 crc kubenswrapper[4687]: E1125 09:20:11.373970 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" podUID="555e5cf5-f2f8-46f2-ab17-8589c7391fc8" Nov 25 09:20:11 crc kubenswrapper[4687]: E1125 09:20:11.376816 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" podUID="78edbfb4-5838-4c2d-a4e3-e1512bb55654" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.377664 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" podStartSLOduration=2.98245278 podStartE2EDuration="31.377652453s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.189445259 +0000 UTC m=+977.243085017" lastFinishedPulling="2025-11-25 09:20:10.584644972 +0000 UTC m=+1005.638284690" observedRunningTime="2025-11-25 09:20:11.355079975 +0000 UTC m=+1006.408719693" watchObservedRunningTime="2025-11-25 09:20:11.377652453 +0000 UTC m=+1006.431292171" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.392877 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" podStartSLOduration=6.186824558 podStartE2EDuration="31.392859819s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.626835872 +0000 UTC m=+977.680475590" lastFinishedPulling="2025-11-25 09:20:07.832871113 +0000 UTC m=+1002.886510851" observedRunningTime="2025-11-25 09:20:11.390636868 +0000 UTC m=+1006.444276586" watchObservedRunningTime="2025-11-25 09:20:11.392859819 +0000 UTC m=+1006.446499537" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.405815 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" podStartSLOduration=5.897036461 podStartE2EDuration="31.405798752s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.626664498 +0000 UTC m=+977.680304216" lastFinishedPulling="2025-11-25 09:20:08.135426789 +0000 UTC m=+1003.189066507" observedRunningTime="2025-11-25 09:20:11.404912029 +0000 UTC m=+1006.458551747" watchObservedRunningTime="2025-11-25 09:20:11.405798752 +0000 UTC m=+1006.459438470" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.499352 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" podStartSLOduration=2.906832942 podStartE2EDuration="31.499333231s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:41.469527467 +0000 UTC m=+976.523167185" lastFinishedPulling="2025-11-25 09:20:10.062027756 +0000 UTC m=+1005.115667474" observedRunningTime="2025-11-25 09:20:11.494299153 +0000 UTC m=+1006.547938871" watchObservedRunningTime="2025-11-25 09:20:11.499333231 +0000 UTC m=+1006.552972949" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.552223 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" podStartSLOduration=2.996627187 podStartE2EDuration="31.552205227s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:41.462798703 +0000 UTC m=+976.516438421" lastFinishedPulling="2025-11-25 09:20:10.018376743 +0000 UTC m=+1005.072016461" observedRunningTime="2025-11-25 09:20:11.548961138 +0000 UTC m=+1006.602600916" watchObservedRunningTime="2025-11-25 09:20:11.552205227 +0000 UTC m=+1006.605844945" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.574225 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" podStartSLOduration=4.248972562 podStartE2EDuration="31.574210509s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.593300815 +0000 UTC m=+977.646940533" lastFinishedPulling="2025-11-25 09:20:09.918538762 +0000 UTC m=+1004.972178480" observedRunningTime="2025-11-25 09:20:11.570867918 +0000 UTC m=+1006.624507636" watchObservedRunningTime="2025-11-25 09:20:11.574210509 +0000 UTC m=+1006.627850227" Nov 25 09:20:11 crc kubenswrapper[4687]: I1125 09:20:11.587442 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" podStartSLOduration=6.443452398 podStartE2EDuration="31.587423091s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.687955735 +0000 UTC m=+977.741595453" lastFinishedPulling="2025-11-25 09:20:07.831926428 +0000 UTC m=+1002.885566146" observedRunningTime="2025-11-25 09:20:11.585772746 +0000 UTC m=+1006.639412474" watchObservedRunningTime="2025-11-25 09:20:11.587423091 +0000 UTC m=+1006.641062799" Nov 25 09:20:12 crc kubenswrapper[4687]: I1125 09:20:12.384104 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" event={"ID":"9c7f6da9-8178-4c3f-b565-9f6eca26c6c7","Type":"ContainerStarted","Data":"bb09256b174f7ae2643a8714edc5dde7a450d632e39d5ea70def148594d481cf"} Nov 25 09:20:12 crc kubenswrapper[4687]: I1125 09:20:12.385413 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" Nov 25 09:20:12 crc kubenswrapper[4687]: I1125 09:20:12.387132 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" event={"ID":"956b1e07-e4b1-44cf-9990-ae928a3e11c7","Type":"ContainerStarted","Data":"126dbd57094ede7f9ad7c54eb35fb9d83c28d9020ef1b12c1a63885c4ba40e86"} Nov 25 09:20:12 crc kubenswrapper[4687]: I1125 09:20:12.390371 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" event={"ID":"ed7a2e30-6110-4b1c-864f-4856c4c0ec8a","Type":"ContainerStarted","Data":"024d7a32393e292cc17072317a2c8fc3409cb72c1e2235af785c435301ded719"} Nov 25 09:20:12 crc kubenswrapper[4687]: I1125 09:20:12.390659 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" Nov 25 09:20:12 crc kubenswrapper[4687]: I1125 09:20:12.393544 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" event={"ID":"4d75764d-49d9-4482-98a9-728dd977f2bd","Type":"ContainerStarted","Data":"0989cf494a163acad5237a017247cde5f037276453a7eb51f7748400bf1a9532"} Nov 25 09:20:12 crc kubenswrapper[4687]: I1125 09:20:12.393581 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" Nov 25 09:20:12 crc kubenswrapper[4687]: E1125 09:20:12.397010 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" podUID="c2ea5569-33b4-403d-9303-770ec432f4cc" Nov 25 09:20:12 crc kubenswrapper[4687]: E1125 09:20:12.400179 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" podUID="9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be" Nov 25 09:20:12 crc kubenswrapper[4687]: E1125 09:20:12.400251 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" podUID="0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14" Nov 25 09:20:12 crc kubenswrapper[4687]: E1125 09:20:12.403727 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" podUID="78edbfb4-5838-4c2d-a4e3-e1512bb55654" Nov 25 09:20:12 crc kubenswrapper[4687]: I1125 09:20:12.415436 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" podStartSLOduration=2.686606017 podStartE2EDuration="32.415417758s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:41.997079687 +0000 UTC m=+977.050719405" lastFinishedPulling="2025-11-25 09:20:11.725891428 +0000 UTC m=+1006.779531146" observedRunningTime="2025-11-25 09:20:12.404982523 +0000 UTC m=+1007.458622241" watchObservedRunningTime="2025-11-25 09:20:12.415417758 +0000 UTC m=+1007.469057476" Nov 25 09:20:12 crc kubenswrapper[4687]: I1125 09:20:12.433096 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" podStartSLOduration=2.453636036 podStartE2EDuration="32.433076451s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:41.747131431 +0000 UTC m=+976.800771149" lastFinishedPulling="2025-11-25 09:20:11.726571846 +0000 UTC m=+1006.780211564" observedRunningTime="2025-11-25 09:20:12.424159857 +0000 UTC m=+1007.477799585" watchObservedRunningTime="2025-11-25 09:20:12.433076451 +0000 UTC m=+1007.486716169" Nov 25 09:20:12 crc kubenswrapper[4687]: I1125 09:20:12.506495 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" podStartSLOduration=2.875619687 podStartE2EDuration="32.506478509s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:41.990297311 +0000 UTC m=+977.043937029" lastFinishedPulling="2025-11-25 09:20:11.621156123 +0000 UTC m=+1006.674795851" observedRunningTime="2025-11-25 09:20:12.501200585 +0000 UTC m=+1007.554840313" watchObservedRunningTime="2025-11-25 09:20:12.506478509 +0000 UTC m=+1007.560118227" Nov 25 09:20:13 crc kubenswrapper[4687]: I1125 09:20:13.401667 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" event={"ID":"62a14c39-245f-4c8d-84b5-b23d023d810f","Type":"ContainerStarted","Data":"c10561b228045bf67f2c4b7edd3d09563b15b69f897faf810f1b8e755d1d27fc"} Nov 25 09:20:13 crc kubenswrapper[4687]: I1125 09:20:13.402121 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" Nov 25 09:20:13 crc kubenswrapper[4687]: I1125 09:20:13.404356 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" event={"ID":"4a1d4849-2906-4fd5-b54e-7f2e567f05ef","Type":"ContainerStarted","Data":"92b79ff88099250006a13542f4bd048065b73189089b41b34a715598fdf082ec"} Nov 25 09:20:13 crc kubenswrapper[4687]: I1125 09:20:13.404408 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" Nov 25 09:20:13 crc kubenswrapper[4687]: I1125 09:20:13.408578 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qfcjj" Nov 25 09:20:13 crc kubenswrapper[4687]: I1125 09:20:13.409375 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-4dn9x" Nov 25 09:20:13 crc kubenswrapper[4687]: I1125 09:20:13.409441 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-lsr97" Nov 25 09:20:13 crc kubenswrapper[4687]: I1125 09:20:13.409489 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-cf25t" Nov 25 09:20:13 crc kubenswrapper[4687]: I1125 09:20:13.420658 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" podStartSLOduration=2.903687314 podStartE2EDuration="33.420640784s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.595406722 +0000 UTC m=+977.649046440" lastFinishedPulling="2025-11-25 09:20:13.112360192 +0000 UTC m=+1008.165999910" observedRunningTime="2025-11-25 09:20:13.419341218 +0000 UTC m=+1008.472980956" watchObservedRunningTime="2025-11-25 09:20:13.420640784 +0000 UTC m=+1008.474280502" Nov 25 09:20:13 crc kubenswrapper[4687]: I1125 09:20:13.445946 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" podStartSLOduration=2.997615364 podStartE2EDuration="33.445927975s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.661819539 +0000 UTC m=+977.715459257" lastFinishedPulling="2025-11-25 09:20:13.11013215 +0000 UTC m=+1008.163771868" observedRunningTime="2025-11-25 09:20:13.441428392 +0000 UTC m=+1008.495068130" watchObservedRunningTime="2025-11-25 09:20:13.445927975 +0000 UTC m=+1008.499567693" Nov 25 09:20:15 crc kubenswrapper[4687]: I1125 09:20:15.204243 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-d664976d5-hdtkm" Nov 25 09:20:20 crc kubenswrapper[4687]: I1125 09:20:20.523831 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-ckt6f" Nov 25 09:20:20 crc kubenswrapper[4687]: I1125 09:20:20.544472 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-jcrff" Nov 25 09:20:20 crc kubenswrapper[4687]: I1125 09:20:20.559160 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-wxt5h" Nov 25 09:20:20 crc kubenswrapper[4687]: I1125 09:20:20.605448 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zwv7l" Nov 25 09:20:20 crc kubenswrapper[4687]: I1125 09:20:20.895352 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7r5w5" Nov 25 09:20:20 crc kubenswrapper[4687]: I1125 09:20:20.937851 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-khvhw" Nov 25 09:20:21 crc kubenswrapper[4687]: I1125 09:20:21.166972 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-99k8n" Nov 25 09:20:21 crc kubenswrapper[4687]: I1125 09:20:21.245861 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-c8plr" Nov 25 09:20:21 crc kubenswrapper[4687]: I1125 09:20:21.275618 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-qsrlz" Nov 25 09:20:21 crc kubenswrapper[4687]: I1125 09:20:21.403131 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-pqd76" Nov 25 09:20:21 crc kubenswrapper[4687]: I1125 09:20:21.414254 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-s9vtg" Nov 25 09:20:21 crc kubenswrapper[4687]: I1125 09:20:21.612517 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-7jmgj" Nov 25 09:20:23 crc kubenswrapper[4687]: I1125 09:20:23.845077 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:20:23 crc kubenswrapper[4687]: I1125 09:20:23.845152 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.567493 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" event={"ID":"555e5cf5-f2f8-46f2-ab17-8589c7391fc8","Type":"ContainerStarted","Data":"611ef6dabb39b120012caa00df0ef7685c4d19b1585fda0516b6fb4d7f87fd79"} Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.568414 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.569568 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" event={"ID":"c2ea5569-33b4-403d-9303-770ec432f4cc","Type":"ContainerStarted","Data":"8c43e09846ea50c4727a59c150ab03a71d81f44a34f2cbedba8496da703eaf76"} Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.569783 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.571378 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" event={"ID":"9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be","Type":"ContainerStarted","Data":"5cca1fcb2bf61f8bc1dcb42481145a5c141f93d8b88c0354b2c9a22056e28266"} Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.571547 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.573273 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" event={"ID":"78edbfb4-5838-4c2d-a4e3-e1512bb55654","Type":"ContainerStarted","Data":"45a901ce0ae3a0305f037d91e4516fc262d3c6ffd5eabdffac550fec5c0e913d"} Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.573551 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.575460 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" event={"ID":"0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14","Type":"ContainerStarted","Data":"5b4d3799f19027b6fef1646b93752cd9f163b94e5670d76a478d0b9f4937e7ef"} Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.575710 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.594292 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" podStartSLOduration=3.766577416 podStartE2EDuration="53.594274468s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.528013969 +0000 UTC m=+977.581653687" lastFinishedPulling="2025-11-25 09:20:32.355711011 +0000 UTC m=+1027.409350739" observedRunningTime="2025-11-25 09:20:33.593623671 +0000 UTC m=+1028.647263399" watchObservedRunningTime="2025-11-25 09:20:33.594274468 +0000 UTC m=+1028.647914196" Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.616719 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" podStartSLOduration=3.914841541 podStartE2EDuration="53.616696111s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.528134672 +0000 UTC m=+977.581774390" lastFinishedPulling="2025-11-25 09:20:32.229989222 +0000 UTC m=+1027.283628960" observedRunningTime="2025-11-25 09:20:33.614967764 +0000 UTC m=+1028.668607492" watchObservedRunningTime="2025-11-25 09:20:33.616696111 +0000 UTC m=+1028.670335849" Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.635805 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" podStartSLOduration=4.402033929 podStartE2EDuration="53.635773424s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.996826492 +0000 UTC m=+978.050466210" lastFinishedPulling="2025-11-25 09:20:32.230565937 +0000 UTC m=+1027.284205705" observedRunningTime="2025-11-25 09:20:33.633760759 +0000 UTC m=+1028.687400547" watchObservedRunningTime="2025-11-25 09:20:33.635773424 +0000 UTC m=+1028.689413152" Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.658594 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" podStartSLOduration=4.004758493 podStartE2EDuration="53.658562767s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:42.617996101 +0000 UTC m=+977.671635819" lastFinishedPulling="2025-11-25 09:20:32.271800325 +0000 UTC m=+1027.325440093" observedRunningTime="2025-11-25 09:20:33.654257519 +0000 UTC m=+1028.707897247" watchObservedRunningTime="2025-11-25 09:20:33.658562767 +0000 UTC m=+1028.712202525" Nov 25 09:20:33 crc kubenswrapper[4687]: I1125 09:20:33.690137 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" podStartSLOduration=4.55929946 podStartE2EDuration="53.69011298s" podCreationTimestamp="2025-11-25 09:19:40 +0000 UTC" firstStartedPulling="2025-11-25 09:19:43.099331666 +0000 UTC m=+978.152971384" lastFinishedPulling="2025-11-25 09:20:32.230145146 +0000 UTC m=+1027.283784904" observedRunningTime="2025-11-25 09:20:33.685760391 +0000 UTC m=+1028.739400179" watchObservedRunningTime="2025-11-25 09:20:33.69011298 +0000 UTC m=+1028.743752708" Nov 25 09:20:41 crc kubenswrapper[4687]: I1125 09:20:41.147866 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-qzdnw" Nov 25 09:20:41 crc kubenswrapper[4687]: I1125 09:20:41.213411 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-z59ft" Nov 25 09:20:41 crc kubenswrapper[4687]: I1125 09:20:41.582258 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-ll9p9" Nov 25 09:20:42 crc kubenswrapper[4687]: I1125 09:20:42.367181 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-w2vc9" Nov 25 09:20:42 crc kubenswrapper[4687]: I1125 09:20:42.859657 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5" Nov 25 09:20:53 crc kubenswrapper[4687]: I1125 09:20:53.845067 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:20:53 crc kubenswrapper[4687]: I1125 09:20:53.845678 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:20:53 crc kubenswrapper[4687]: I1125 09:20:53.845721 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:20:53 crc kubenswrapper[4687]: I1125 09:20:53.846298 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"775e1f554d9dd2a0b079c1ff7e2f05e88c335de1a345eef583910fe573bfcecf"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:20:53 crc kubenswrapper[4687]: I1125 09:20:53.846363 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://775e1f554d9dd2a0b079c1ff7e2f05e88c335de1a345eef583910fe573bfcecf" gracePeriod=600 Nov 25 09:20:54 crc kubenswrapper[4687]: I1125 09:20:54.757099 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="775e1f554d9dd2a0b079c1ff7e2f05e88c335de1a345eef583910fe573bfcecf" exitCode=0 Nov 25 09:20:54 crc kubenswrapper[4687]: I1125 09:20:54.757165 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"775e1f554d9dd2a0b079c1ff7e2f05e88c335de1a345eef583910fe573bfcecf"} Nov 25 09:20:54 crc kubenswrapper[4687]: I1125 09:20:54.757790 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"0e89ebd4720ffd5c135c8bc00be72ce7345dd6f93bd878517e70d876f94fe463"} Nov 25 09:20:54 crc kubenswrapper[4687]: I1125 09:20:54.757818 4687 scope.go:117] "RemoveContainer" containerID="0b6f3d727e83b272772a62a183625cfadd2675aa7e49dfb9d67bfd134f3394f8" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.747983 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-8kjgf"] Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.749948 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.754598 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-8c77z" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.755486 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.755642 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.755820 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.761377 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-8kjgf"] Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.798782 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jfcnf"] Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.800678 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.807849 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.814051 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jfcnf"] Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.921741 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-jfcnf\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.921992 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-config\") pod \"dnsmasq-dns-78dd6ddcc-jfcnf\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.922013 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njxds\" (UniqueName: \"kubernetes.io/projected/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-kube-api-access-njxds\") pod \"dnsmasq-dns-675f4bcbfc-8kjgf\" (UID: \"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.922073 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm8dq\" (UniqueName: \"kubernetes.io/projected/3ad63126-08ac-4c9b-9693-ed498260fd9b-kube-api-access-xm8dq\") pod \"dnsmasq-dns-78dd6ddcc-jfcnf\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:58 crc kubenswrapper[4687]: I1125 09:20:58.922102 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-config\") pod \"dnsmasq-dns-675f4bcbfc-8kjgf\" (UID: \"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.023239 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm8dq\" (UniqueName: \"kubernetes.io/projected/3ad63126-08ac-4c9b-9693-ed498260fd9b-kube-api-access-xm8dq\") pod \"dnsmasq-dns-78dd6ddcc-jfcnf\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.023296 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-config\") pod \"dnsmasq-dns-675f4bcbfc-8kjgf\" (UID: \"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.023351 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-jfcnf\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.023380 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-config\") pod \"dnsmasq-dns-78dd6ddcc-jfcnf\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.023397 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njxds\" (UniqueName: \"kubernetes.io/projected/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-kube-api-access-njxds\") pod \"dnsmasq-dns-675f4bcbfc-8kjgf\" (UID: \"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.024351 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-jfcnf\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.024445 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-config\") pod \"dnsmasq-dns-78dd6ddcc-jfcnf\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.024545 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-config\") pod \"dnsmasq-dns-675f4bcbfc-8kjgf\" (UID: \"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.043322 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm8dq\" (UniqueName: \"kubernetes.io/projected/3ad63126-08ac-4c9b-9693-ed498260fd9b-kube-api-access-xm8dq\") pod \"dnsmasq-dns-78dd6ddcc-jfcnf\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.048196 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njxds\" (UniqueName: \"kubernetes.io/projected/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-kube-api-access-njxds\") pod \"dnsmasq-dns-675f4bcbfc-8kjgf\" (UID: \"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.069460 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.120497 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.511442 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-8kjgf"] Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.514834 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:20:59 crc kubenswrapper[4687]: W1125 09:20:59.585489 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ad63126_08ac_4c9b_9693_ed498260fd9b.slice/crio-5dd45ad10548cc00e3bdb58539e5d3f42dddc2877c2d0706677645316631c7e3 WatchSource:0}: Error finding container 5dd45ad10548cc00e3bdb58539e5d3f42dddc2877c2d0706677645316631c7e3: Status 404 returned error can't find the container with id 5dd45ad10548cc00e3bdb58539e5d3f42dddc2877c2d0706677645316631c7e3 Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.585817 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jfcnf"] Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.793987 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" event={"ID":"3ad63126-08ac-4c9b-9693-ed498260fd9b","Type":"ContainerStarted","Data":"5dd45ad10548cc00e3bdb58539e5d3f42dddc2877c2d0706677645316631c7e3"} Nov 25 09:20:59 crc kubenswrapper[4687]: I1125 09:20:59.795966 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" event={"ID":"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21","Type":"ContainerStarted","Data":"c5bb436e5ab4e16823677f1613da8fbb42b42be37e9cfd08fffe2081a1ededbc"} Nov 25 09:21:01 crc kubenswrapper[4687]: I1125 09:21:01.905108 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-8kjgf"] Nov 25 09:21:01 crc kubenswrapper[4687]: I1125 09:21:01.927984 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7snmr"] Nov 25 09:21:01 crc kubenswrapper[4687]: I1125 09:21:01.933940 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:01 crc kubenswrapper[4687]: I1125 09:21:01.936836 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7snmr"] Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.072596 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvkcj\" (UniqueName: \"kubernetes.io/projected/b48297c6-fda2-4f05-aee3-510ac3d2d03b-kube-api-access-hvkcj\") pod \"dnsmasq-dns-666b6646f7-7snmr\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.072640 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-config\") pod \"dnsmasq-dns-666b6646f7-7snmr\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.072672 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-dns-svc\") pod \"dnsmasq-dns-666b6646f7-7snmr\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.173972 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvkcj\" (UniqueName: \"kubernetes.io/projected/b48297c6-fda2-4f05-aee3-510ac3d2d03b-kube-api-access-hvkcj\") pod \"dnsmasq-dns-666b6646f7-7snmr\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.174012 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-config\") pod \"dnsmasq-dns-666b6646f7-7snmr\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.174039 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-dns-svc\") pod \"dnsmasq-dns-666b6646f7-7snmr\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.174915 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-dns-svc\") pod \"dnsmasq-dns-666b6646f7-7snmr\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.175730 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-config\") pod \"dnsmasq-dns-666b6646f7-7snmr\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.206494 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jfcnf"] Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.231465 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvkcj\" (UniqueName: \"kubernetes.io/projected/b48297c6-fda2-4f05-aee3-510ac3d2d03b-kube-api-access-hvkcj\") pod \"dnsmasq-dns-666b6646f7-7snmr\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.258643 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mtcxf"] Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.260262 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.281802 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.300321 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mtcxf"] Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.377413 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spz9h\" (UniqueName: \"kubernetes.io/projected/a4c3e61a-c02f-410d-aed1-76fe207f46c5-kube-api-access-spz9h\") pod \"dnsmasq-dns-57d769cc4f-mtcxf\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.377579 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-config\") pod \"dnsmasq-dns-57d769cc4f-mtcxf\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.377636 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-mtcxf\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.485254 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spz9h\" (UniqueName: \"kubernetes.io/projected/a4c3e61a-c02f-410d-aed1-76fe207f46c5-kube-api-access-spz9h\") pod \"dnsmasq-dns-57d769cc4f-mtcxf\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.485537 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-config\") pod \"dnsmasq-dns-57d769cc4f-mtcxf\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.485574 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-mtcxf\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.486496 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-mtcxf\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.486495 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-config\") pod \"dnsmasq-dns-57d769cc4f-mtcxf\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.512951 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spz9h\" (UniqueName: \"kubernetes.io/projected/a4c3e61a-c02f-410d-aed1-76fe207f46c5-kube-api-access-spz9h\") pod \"dnsmasq-dns-57d769cc4f-mtcxf\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.591222 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.673186 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7snmr"] Nov 25 09:21:02 crc kubenswrapper[4687]: W1125 09:21:02.679994 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb48297c6_fda2_4f05_aee3_510ac3d2d03b.slice/crio-7c6f51274254f05aae5a8ec1542c8fb6d838d7f576dab44d8c33ca6ab9122e06 WatchSource:0}: Error finding container 7c6f51274254f05aae5a8ec1542c8fb6d838d7f576dab44d8c33ca6ab9122e06: Status 404 returned error can't find the container with id 7c6f51274254f05aae5a8ec1542c8fb6d838d7f576dab44d8c33ca6ab9122e06 Nov 25 09:21:02 crc kubenswrapper[4687]: I1125 09:21:02.874328 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" event={"ID":"b48297c6-fda2-4f05-aee3-510ac3d2d03b","Type":"ContainerStarted","Data":"7c6f51274254f05aae5a8ec1542c8fb6d838d7f576dab44d8c33ca6ab9122e06"} Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.093112 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.099474 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.101714 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.101975 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.102381 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.102587 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mnbpd" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.102850 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.102944 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.103136 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.111701 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.122141 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mtcxf"] Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.207628 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.207996 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.208078 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.208170 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.208203 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.209432 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.209465 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.210214 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.210277 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klxh2\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-kube-api-access-klxh2\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.210319 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.210397 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-config-data\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.311665 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-config-data\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.311719 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.311759 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.311789 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.311827 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.311856 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.311913 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.311993 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.312023 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.312051 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klxh2\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-kube-api-access-klxh2\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.312073 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.313400 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.314407 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-config-data\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.314920 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.315170 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.317290 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.319230 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.321145 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.330276 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klxh2\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-kube-api-access-klxh2\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.336089 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.355063 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.355536 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.358169 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.407365 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.408602 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.417374 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.417910 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.417924 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.417927 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.418109 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-94z4c" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.418145 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.421923 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.421925 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.426580 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.514716 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.514891 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.514915 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.514941 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.514958 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.514985 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.515013 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.515036 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.515054 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9v9wz\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-kube-api-access-9v9wz\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.515073 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.515090 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.617280 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.617939 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9v9wz\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-kube-api-access-9v9wz\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.617972 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.618009 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.618037 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.618080 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.618099 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.618125 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.618143 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.618173 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.618318 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.619068 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.620994 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.622775 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.623297 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.623687 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.623798 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.628523 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.632040 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.634000 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.637170 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.653260 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9v9wz\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-kube-api-access-9v9wz\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.690724 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:03 crc kubenswrapper[4687]: I1125 09:21:03.738960 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:21:04 crc kubenswrapper[4687]: I1125 09:21:04.887900 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:21:04 crc kubenswrapper[4687]: I1125 09:21:04.893709 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 09:21:04 crc kubenswrapper[4687]: I1125 09:21:04.899296 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:21:04 crc kubenswrapper[4687]: I1125 09:21:04.899642 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 09:21:04 crc kubenswrapper[4687]: I1125 09:21:04.899652 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 09:21:04 crc kubenswrapper[4687]: I1125 09:21:04.899852 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-sw5tf" Nov 25 09:21:04 crc kubenswrapper[4687]: I1125 09:21:04.899910 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 09:21:04 crc kubenswrapper[4687]: I1125 09:21:04.904444 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.041195 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.041248 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-config-data-default\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.041323 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.041364 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp86z\" (UniqueName: \"kubernetes.io/projected/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-kube-api-access-dp86z\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.041409 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-kolla-config\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.041441 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.041462 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.041531 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.142861 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.142906 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-config-data-default\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.142956 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.142987 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp86z\" (UniqueName: \"kubernetes.io/projected/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-kube-api-access-dp86z\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.143019 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-kolla-config\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.143043 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.143060 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.143077 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.143347 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.143905 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-kolla-config\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.143901 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.144866 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.145268 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-config-data-default\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.147391 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.161019 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp86z\" (UniqueName: \"kubernetes.io/projected/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-kube-api-access-dp86z\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.162244 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.163677 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7\") " pod="openstack/openstack-galera-0" Nov 25 09:21:05 crc kubenswrapper[4687]: I1125 09:21:05.245197 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.336429 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.337582 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.339995 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.340982 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.341187 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.341362 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-wzc62" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.356991 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.467313 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjl25\" (UniqueName: \"kubernetes.io/projected/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-kube-api-access-sjl25\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.467380 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.467410 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.467482 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.467529 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.467561 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.467595 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.467642 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.569221 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjl25\" (UniqueName: \"kubernetes.io/projected/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-kube-api-access-sjl25\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.569276 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.569300 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.569334 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.569359 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.569383 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.569410 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.569447 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.569573 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.570292 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.570413 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.570790 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.571122 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.576256 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.577433 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.589901 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjl25\" (UniqueName: \"kubernetes.io/projected/b1c0236e-917b-4c65-a9b7-6d3508c1f4a8-kube-api-access-sjl25\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.590399 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.595340 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.597046 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.597759 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-zlgkc" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.597776 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.607074 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.607350 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.668714 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.670458 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgdql\" (UniqueName: \"kubernetes.io/projected/8b16fcc2-1dd1-47d5-979a-f50611173736-kube-api-access-zgdql\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.670605 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8b16fcc2-1dd1-47d5-979a-f50611173736-config-data\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.670761 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b16fcc2-1dd1-47d5-979a-f50611173736-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.670885 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8b16fcc2-1dd1-47d5-979a-f50611173736-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.670955 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8b16fcc2-1dd1-47d5-979a-f50611173736-kolla-config\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.772451 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8b16fcc2-1dd1-47d5-979a-f50611173736-config-data\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.772574 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b16fcc2-1dd1-47d5-979a-f50611173736-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.772649 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8b16fcc2-1dd1-47d5-979a-f50611173736-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.772688 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8b16fcc2-1dd1-47d5-979a-f50611173736-kolla-config\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.772745 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgdql\" (UniqueName: \"kubernetes.io/projected/8b16fcc2-1dd1-47d5-979a-f50611173736-kube-api-access-zgdql\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.773529 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8b16fcc2-1dd1-47d5-979a-f50611173736-kolla-config\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.773906 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8b16fcc2-1dd1-47d5-979a-f50611173736-config-data\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.776927 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8b16fcc2-1dd1-47d5-979a-f50611173736-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.777484 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b16fcc2-1dd1-47d5-979a-f50611173736-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.792042 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgdql\" (UniqueName: \"kubernetes.io/projected/8b16fcc2-1dd1-47d5-979a-f50611173736-kube-api-access-zgdql\") pod \"memcached-0\" (UID: \"8b16fcc2-1dd1-47d5-979a-f50611173736\") " pod="openstack/memcached-0" Nov 25 09:21:06 crc kubenswrapper[4687]: I1125 09:21:06.984352 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 09:21:08 crc kubenswrapper[4687]: I1125 09:21:08.535404 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:21:08 crc kubenswrapper[4687]: I1125 09:21:08.537652 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:21:08 crc kubenswrapper[4687]: I1125 09:21:08.539864 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-l26jd" Nov 25 09:21:08 crc kubenswrapper[4687]: I1125 09:21:08.550011 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:21:08 crc kubenswrapper[4687]: I1125 09:21:08.596342 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grtzq\" (UniqueName: \"kubernetes.io/projected/1c8dccfc-e66b-4a60-b0c4-d7c94652dd12-kube-api-access-grtzq\") pod \"kube-state-metrics-0\" (UID: \"1c8dccfc-e66b-4a60-b0c4-d7c94652dd12\") " pod="openstack/kube-state-metrics-0" Nov 25 09:21:08 crc kubenswrapper[4687]: I1125 09:21:08.697384 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grtzq\" (UniqueName: \"kubernetes.io/projected/1c8dccfc-e66b-4a60-b0c4-d7c94652dd12-kube-api-access-grtzq\") pod \"kube-state-metrics-0\" (UID: \"1c8dccfc-e66b-4a60-b0c4-d7c94652dd12\") " pod="openstack/kube-state-metrics-0" Nov 25 09:21:08 crc kubenswrapper[4687]: I1125 09:21:08.718274 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grtzq\" (UniqueName: \"kubernetes.io/projected/1c8dccfc-e66b-4a60-b0c4-d7c94652dd12-kube-api-access-grtzq\") pod \"kube-state-metrics-0\" (UID: \"1c8dccfc-e66b-4a60-b0c4-d7c94652dd12\") " pod="openstack/kube-state-metrics-0" Nov 25 09:21:08 crc kubenswrapper[4687]: I1125 09:21:08.855074 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:21:09 crc kubenswrapper[4687]: I1125 09:21:09.921540 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" event={"ID":"a4c3e61a-c02f-410d-aed1-76fe207f46c5","Type":"ContainerStarted","Data":"b89abe4e8fb0f5c12517cea4eb2a81aeb10246cc0d8d2b169564e976fe428735"} Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.326160 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-fdpzn"] Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.327913 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.332690 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.333068 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.335395 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-7hqqt" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.341130 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fdpzn"] Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.350146 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-hstvx"] Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.353045 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.353811 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6894bad0-9f1e-4d44-89a3-b06c6b24495a-var-run\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.353843 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec3fcef6-578a-4687-9af6-18d6de32f1e1-scripts\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.353861 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-var-run\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.353877 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6894bad0-9f1e-4d44-89a3-b06c6b24495a-var-run-ovn\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.353903 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vx7t\" (UniqueName: \"kubernetes.io/projected/ec3fcef6-578a-4687-9af6-18d6de32f1e1-kube-api-access-2vx7t\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.353977 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6894bad0-9f1e-4d44-89a3-b06c6b24495a-scripts\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.353992 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6894bad0-9f1e-4d44-89a3-b06c6b24495a-combined-ca-bundle\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.354012 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-var-log\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.354032 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zg8b\" (UniqueName: \"kubernetes.io/projected/6894bad0-9f1e-4d44-89a3-b06c6b24495a-kube-api-access-2zg8b\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.354050 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-var-lib\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.354066 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6894bad0-9f1e-4d44-89a3-b06c6b24495a-ovn-controller-tls-certs\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.354085 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6894bad0-9f1e-4d44-89a3-b06c6b24495a-var-log-ovn\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.354104 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-etc-ovs\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.361945 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hstvx"] Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455414 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6894bad0-9f1e-4d44-89a3-b06c6b24495a-var-run-ovn\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455470 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vx7t\" (UniqueName: \"kubernetes.io/projected/ec3fcef6-578a-4687-9af6-18d6de32f1e1-kube-api-access-2vx7t\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455531 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6894bad0-9f1e-4d44-89a3-b06c6b24495a-scripts\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455551 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6894bad0-9f1e-4d44-89a3-b06c6b24495a-combined-ca-bundle\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455572 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-var-log\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455592 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zg8b\" (UniqueName: \"kubernetes.io/projected/6894bad0-9f1e-4d44-89a3-b06c6b24495a-kube-api-access-2zg8b\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455610 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-var-lib\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455627 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6894bad0-9f1e-4d44-89a3-b06c6b24495a-ovn-controller-tls-certs\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455645 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6894bad0-9f1e-4d44-89a3-b06c6b24495a-var-log-ovn\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455665 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-etc-ovs\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455711 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6894bad0-9f1e-4d44-89a3-b06c6b24495a-var-run\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455727 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec3fcef6-578a-4687-9af6-18d6de32f1e1-scripts\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.455749 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-var-run\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.456655 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-var-run\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.456663 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6894bad0-9f1e-4d44-89a3-b06c6b24495a-var-run-ovn\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.456898 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-etc-ovs\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.457029 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6894bad0-9f1e-4d44-89a3-b06c6b24495a-var-log-ovn\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.457114 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-var-log\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.457545 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6894bad0-9f1e-4d44-89a3-b06c6b24495a-var-run\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.457575 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ec3fcef6-578a-4687-9af6-18d6de32f1e1-var-lib\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.458876 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6894bad0-9f1e-4d44-89a3-b06c6b24495a-scripts\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.459692 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec3fcef6-578a-4687-9af6-18d6de32f1e1-scripts\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.466627 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6894bad0-9f1e-4d44-89a3-b06c6b24495a-ovn-controller-tls-certs\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.476884 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vx7t\" (UniqueName: \"kubernetes.io/projected/ec3fcef6-578a-4687-9af6-18d6de32f1e1-kube-api-access-2vx7t\") pod \"ovn-controller-ovs-hstvx\" (UID: \"ec3fcef6-578a-4687-9af6-18d6de32f1e1\") " pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.482224 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zg8b\" (UniqueName: \"kubernetes.io/projected/6894bad0-9f1e-4d44-89a3-b06c6b24495a-kube-api-access-2zg8b\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.483597 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6894bad0-9f1e-4d44-89a3-b06c6b24495a-combined-ca-bundle\") pod \"ovn-controller-fdpzn\" (UID: \"6894bad0-9f1e-4d44-89a3-b06c6b24495a\") " pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.599891 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.601723 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.604479 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.605027 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.605174 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.605223 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-2jvjk" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.605418 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.612931 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.656450 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.666711 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.674474 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.761336 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.761393 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.761416 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-config\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.761462 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.761488 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.761542 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8sc7\" (UniqueName: \"kubernetes.io/projected/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-kube-api-access-z8sc7\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.761588 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.761625 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.771091 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.867581 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.867641 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.867664 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-config\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.867720 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.867751 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.867815 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8sc7\" (UniqueName: \"kubernetes.io/projected/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-kube-api-access-z8sc7\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.867871 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.867921 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.870304 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.871796 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.871899 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.871921 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-config\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.873865 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.874213 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.874280 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.894866 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8sc7\" (UniqueName: \"kubernetes.io/projected/9d20bc24-507c-4712-8c05-c8d3cfd4e87f-kube-api-access-z8sc7\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.907777 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9d20bc24-507c-4712-8c05-c8d3cfd4e87f\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:12 crc kubenswrapper[4687]: I1125 09:21:12.928218 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.668425 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.670170 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.672329 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.672733 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-8zhmf" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.672780 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.676498 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.676888 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.801496 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlp9g\" (UniqueName: \"kubernetes.io/projected/ae74a803-c417-4ab8-8842-20a575b77dd3-kube-api-access-zlp9g\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.801719 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae74a803-c417-4ab8-8842-20a575b77dd3-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.801762 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ae74a803-c417-4ab8-8842-20a575b77dd3-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.801791 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae74a803-c417-4ab8-8842-20a575b77dd3-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.801821 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae74a803-c417-4ab8-8842-20a575b77dd3-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.801947 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae74a803-c417-4ab8-8842-20a575b77dd3-config\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.801975 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.802005 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ae74a803-c417-4ab8-8842-20a575b77dd3-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.904429 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae74a803-c417-4ab8-8842-20a575b77dd3-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.904494 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ae74a803-c417-4ab8-8842-20a575b77dd3-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.904534 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae74a803-c417-4ab8-8842-20a575b77dd3-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.904574 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae74a803-c417-4ab8-8842-20a575b77dd3-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.904635 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae74a803-c417-4ab8-8842-20a575b77dd3-config\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.904666 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.904691 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ae74a803-c417-4ab8-8842-20a575b77dd3-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.904756 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlp9g\" (UniqueName: \"kubernetes.io/projected/ae74a803-c417-4ab8-8842-20a575b77dd3-kube-api-access-zlp9g\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.904978 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.909340 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ae74a803-c417-4ab8-8842-20a575b77dd3-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.909872 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae74a803-c417-4ab8-8842-20a575b77dd3-config\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.909997 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ae74a803-c417-4ab8-8842-20a575b77dd3-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.912459 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae74a803-c417-4ab8-8842-20a575b77dd3-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.913120 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae74a803-c417-4ab8-8842-20a575b77dd3-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.920429 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae74a803-c417-4ab8-8842-20a575b77dd3-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.923353 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlp9g\" (UniqueName: \"kubernetes.io/projected/ae74a803-c417-4ab8-8842-20a575b77dd3-kube-api-access-zlp9g\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.934020 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ae74a803-c417-4ab8-8842-20a575b77dd3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:15 crc kubenswrapper[4687]: I1125 09:21:15.994791 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:18 crc kubenswrapper[4687]: W1125 09:21:18.047646 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c8dccfc_e66b_4a60_b0c4_d7c94652dd12.slice/crio-b8e8afda35f66bfdd2004769a55297e167b9878cf39ea5d7824fb4222e519e92 WatchSource:0}: Error finding container b8e8afda35f66bfdd2004769a55297e167b9878cf39ea5d7824fb4222e519e92: Status 404 returned error can't find the container with id b8e8afda35f66bfdd2004769a55297e167b9878cf39ea5d7824fb4222e519e92 Nov 25 09:21:18 crc kubenswrapper[4687]: W1125 09:21:18.048968 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b16fcc2_1dd1_47d5_979a_f50611173736.slice/crio-29005ff92603ad984070be65bc675f3f7fc95f47ac2c68a55945e9e2d8ad6aa3 WatchSource:0}: Error finding container 29005ff92603ad984070be65bc675f3f7fc95f47ac2c68a55945e9e2d8ad6aa3: Status 404 returned error can't find the container with id 29005ff92603ad984070be65bc675f3f7fc95f47ac2c68a55945e9e2d8ad6aa3 Nov 25 09:21:18 crc kubenswrapper[4687]: I1125 09:21:18.336652 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1c8dccfc-e66b-4a60-b0c4-d7c94652dd12","Type":"ContainerStarted","Data":"b8e8afda35f66bfdd2004769a55297e167b9878cf39ea5d7824fb4222e519e92"} Nov 25 09:21:18 crc kubenswrapper[4687]: I1125 09:21:18.337759 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8b16fcc2-1dd1-47d5-979a-f50611173736","Type":"ContainerStarted","Data":"29005ff92603ad984070be65bc675f3f7fc95f47ac2c68a55945e9e2d8ad6aa3"} Nov 25 09:21:18 crc kubenswrapper[4687]: E1125 09:21:18.753390 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 09:21:18 crc kubenswrapper[4687]: E1125 09:21:18.753724 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xm8dq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-jfcnf_openstack(3ad63126-08ac-4c9b-9693-ed498260fd9b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:21:18 crc kubenswrapper[4687]: E1125 09:21:18.754985 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" podUID="3ad63126-08ac-4c9b-9693-ed498260fd9b" Nov 25 09:21:18 crc kubenswrapper[4687]: E1125 09:21:18.794578 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 09:21:18 crc kubenswrapper[4687]: E1125 09:21:18.794938 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-njxds,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-8kjgf_openstack(d69fe00b-6cf3-4c7b-9cd9-6991a107bd21): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:21:18 crc kubenswrapper[4687]: E1125 09:21:18.796713 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" podUID="d69fe00b-6cf3-4c7b-9cd9-6991a107bd21" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.271223 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.277949 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.283391 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.346637 4687 generic.go:334] "Generic (PLEG): container finished" podID="b48297c6-fda2-4f05-aee3-510ac3d2d03b" containerID="f29eec8bd25b9ddc056a71a70e611d7bfb0262100e023938c432699bec0f5939" exitCode=0 Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.346739 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" event={"ID":"b48297c6-fda2-4f05-aee3-510ac3d2d03b","Type":"ContainerDied","Data":"f29eec8bd25b9ddc056a71a70e611d7bfb0262100e023938c432699bec0f5939"} Nov 25 09:21:19 crc kubenswrapper[4687]: W1125 09:21:19.372222 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1c0236e_917b_4c65_a9b7_6d3508c1f4a8.slice/crio-4bcb8eb6880126fa288f3d653bdbb0298f55ce7a439b38e83db95bd70ed53cb5 WatchSource:0}: Error finding container 4bcb8eb6880126fa288f3d653bdbb0298f55ce7a439b38e83db95bd70ed53cb5: Status 404 returned error can't find the container with id 4bcb8eb6880126fa288f3d653bdbb0298f55ce7a439b38e83db95bd70ed53cb5 Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.374956 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" event={"ID":"a4c3e61a-c02f-410d-aed1-76fe207f46c5","Type":"ContainerStarted","Data":"0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b"} Nov 25 09:21:19 crc kubenswrapper[4687]: W1125 09:21:19.381406 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabf2ba7b_04ee_4461_9aa5_0d1ba9fa1ec7.slice/crio-ac983ac144d32c67ead6359f9ee51ca9dd33332ea2af263edb41894360eb49c0 WatchSource:0}: Error finding container ac983ac144d32c67ead6359f9ee51ca9dd33332ea2af263edb41894360eb49c0: Status 404 returned error can't find the container with id ac983ac144d32c67ead6359f9ee51ca9dd33332ea2af263edb41894360eb49c0 Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.426727 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.495716 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fdpzn"] Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.608449 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.768546 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.779546 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.780937 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-dns-svc\") pod \"3ad63126-08ac-4c9b-9693-ed498260fd9b\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.781085 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-config\") pod \"3ad63126-08ac-4c9b-9693-ed498260fd9b\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.781155 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xm8dq\" (UniqueName: \"kubernetes.io/projected/3ad63126-08ac-4c9b-9693-ed498260fd9b-kube-api-access-xm8dq\") pod \"3ad63126-08ac-4c9b-9693-ed498260fd9b\" (UID: \"3ad63126-08ac-4c9b-9693-ed498260fd9b\") " Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.781741 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3ad63126-08ac-4c9b-9693-ed498260fd9b" (UID: "3ad63126-08ac-4c9b-9693-ed498260fd9b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.782006 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-config" (OuterVolumeSpecName: "config") pod "3ad63126-08ac-4c9b-9693-ed498260fd9b" (UID: "3ad63126-08ac-4c9b-9693-ed498260fd9b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.783380 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.788588 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ad63126-08ac-4c9b-9693-ed498260fd9b-kube-api-access-xm8dq" (OuterVolumeSpecName: "kube-api-access-xm8dq") pod "3ad63126-08ac-4c9b-9693-ed498260fd9b" (UID: "3ad63126-08ac-4c9b-9693-ed498260fd9b"). InnerVolumeSpecName "kube-api-access-xm8dq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.882285 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njxds\" (UniqueName: \"kubernetes.io/projected/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-kube-api-access-njxds\") pod \"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21\" (UID: \"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21\") " Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.882372 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-config\") pod \"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21\" (UID: \"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21\") " Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.882873 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-config" (OuterVolumeSpecName: "config") pod "d69fe00b-6cf3-4c7b-9cd9-6991a107bd21" (UID: "d69fe00b-6cf3-4c7b-9cd9-6991a107bd21"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.883135 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.883152 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xm8dq\" (UniqueName: \"kubernetes.io/projected/3ad63126-08ac-4c9b-9693-ed498260fd9b-kube-api-access-xm8dq\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.883161 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.883170 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ad63126-08ac-4c9b-9693-ed498260fd9b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.893519 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-kube-api-access-njxds" (OuterVolumeSpecName: "kube-api-access-njxds") pod "d69fe00b-6cf3-4c7b-9cd9-6991a107bd21" (UID: "d69fe00b-6cf3-4c7b-9cd9-6991a107bd21"). InnerVolumeSpecName "kube-api-access-njxds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:19 crc kubenswrapper[4687]: I1125 09:21:19.984420 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njxds\" (UniqueName: \"kubernetes.io/projected/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21-kube-api-access-njxds\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.387610 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ae74a803-c417-4ab8-8842-20a575b77dd3","Type":"ContainerStarted","Data":"066599515bda26da154513faeb5e5230c43ffcba3688226997628f5e2b3a193d"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.388766 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8","Type":"ContainerStarted","Data":"4bcb8eb6880126fa288f3d653bdbb0298f55ce7a439b38e83db95bd70ed53cb5"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.389863 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9d20bc24-507c-4712-8c05-c8d3cfd4e87f","Type":"ContainerStarted","Data":"621405d6e3c86b69bd562f0dad29ce99131dfa81b5210fb80c2697a5cf0ece64"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.390895 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee","Type":"ContainerStarted","Data":"5177f22b77c0d87914a05a27143ad7b3889f694436d8cd474f181597bd38c846"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.392041 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" event={"ID":"d69fe00b-6cf3-4c7b-9cd9-6991a107bd21","Type":"ContainerDied","Data":"c5bb436e5ab4e16823677f1613da8fbb42b42be37e9cfd08fffe2081a1ededbc"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.392047 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-8kjgf" Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.396224 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" event={"ID":"a4c3e61a-c02f-410d-aed1-76fe207f46c5","Type":"ContainerDied","Data":"0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.396686 4687 generic.go:334] "Generic (PLEG): container finished" podID="a4c3e61a-c02f-410d-aed1-76fe207f46c5" containerID="0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b" exitCode=0 Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.399568 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" event={"ID":"3ad63126-08ac-4c9b-9693-ed498260fd9b","Type":"ContainerDied","Data":"5dd45ad10548cc00e3bdb58539e5d3f42dddc2877c2d0706677645316631c7e3"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.399661 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jfcnf" Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.406884 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7","Type":"ContainerStarted","Data":"1a3ef451201ec0548824312a3e406c1cba96ee06c04d62ee4a890ab8a65a3676"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.407998 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fdpzn" event={"ID":"6894bad0-9f1e-4d44-89a3-b06c6b24495a","Type":"ContainerStarted","Data":"309936d1332ad061cc8a5c52801af3ef033ac1d506d38d30a72402c01028124c"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.409190 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7","Type":"ContainerStarted","Data":"ac983ac144d32c67ead6359f9ee51ca9dd33332ea2af263edb41894360eb49c0"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.411576 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" event={"ID":"b48297c6-fda2-4f05-aee3-510ac3d2d03b","Type":"ContainerStarted","Data":"a0e8443a9e4e0db451bb1d5078d6afb8b3a5a8dfbed59c32524b3d5e414e7566"} Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.411786 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.443573 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" podStartSLOduration=3.247295618 podStartE2EDuration="19.443538237s" podCreationTimestamp="2025-11-25 09:21:01 +0000 UTC" firstStartedPulling="2025-11-25 09:21:02.685465557 +0000 UTC m=+1057.739105275" lastFinishedPulling="2025-11-25 09:21:18.881708176 +0000 UTC m=+1073.935347894" observedRunningTime="2025-11-25 09:21:20.439131656 +0000 UTC m=+1075.492771384" watchObservedRunningTime="2025-11-25 09:21:20.443538237 +0000 UTC m=+1075.497177955" Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.469144 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hstvx"] Nov 25 09:21:20 crc kubenswrapper[4687]: W1125 09:21:20.472845 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec3fcef6_578a_4687_9af6_18d6de32f1e1.slice/crio-1d3d4edb2e06d799720c886b65839794949ea8ed5ab69cd03d087d951f449d05 WatchSource:0}: Error finding container 1d3d4edb2e06d799720c886b65839794949ea8ed5ab69cd03d087d951f449d05: Status 404 returned error can't find the container with id 1d3d4edb2e06d799720c886b65839794949ea8ed5ab69cd03d087d951f449d05 Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.579004 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-8kjgf"] Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.593097 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-8kjgf"] Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.607862 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jfcnf"] Nov 25 09:21:20 crc kubenswrapper[4687]: I1125 09:21:20.613322 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jfcnf"] Nov 25 09:21:21 crc kubenswrapper[4687]: I1125 09:21:21.420700 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" event={"ID":"a4c3e61a-c02f-410d-aed1-76fe207f46c5","Type":"ContainerStarted","Data":"72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b"} Nov 25 09:21:21 crc kubenswrapper[4687]: I1125 09:21:21.420831 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:21 crc kubenswrapper[4687]: I1125 09:21:21.422261 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hstvx" event={"ID":"ec3fcef6-578a-4687-9af6-18d6de32f1e1","Type":"ContainerStarted","Data":"1d3d4edb2e06d799720c886b65839794949ea8ed5ab69cd03d087d951f449d05"} Nov 25 09:21:21 crc kubenswrapper[4687]: I1125 09:21:21.444723 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" podStartSLOduration=10.179804292 podStartE2EDuration="19.444705791s" podCreationTimestamp="2025-11-25 09:21:02 +0000 UTC" firstStartedPulling="2025-11-25 09:21:09.620688273 +0000 UTC m=+1064.674328001" lastFinishedPulling="2025-11-25 09:21:18.885589772 +0000 UTC m=+1073.939229500" observedRunningTime="2025-11-25 09:21:21.436414144 +0000 UTC m=+1076.490053862" watchObservedRunningTime="2025-11-25 09:21:21.444705791 +0000 UTC m=+1076.498345519" Nov 25 09:21:21 crc kubenswrapper[4687]: I1125 09:21:21.744577 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ad63126-08ac-4c9b-9693-ed498260fd9b" path="/var/lib/kubelet/pods/3ad63126-08ac-4c9b-9693-ed498260fd9b/volumes" Nov 25 09:21:21 crc kubenswrapper[4687]: I1125 09:21:21.745155 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d69fe00b-6cf3-4c7b-9cd9-6991a107bd21" path="/var/lib/kubelet/pods/d69fe00b-6cf3-4c7b-9cd9-6991a107bd21/volumes" Nov 25 09:21:27 crc kubenswrapper[4687]: I1125 09:21:27.284106 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:27 crc kubenswrapper[4687]: I1125 09:21:27.593570 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:27 crc kubenswrapper[4687]: I1125 09:21:27.665068 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7snmr"] Nov 25 09:21:27 crc kubenswrapper[4687]: I1125 09:21:27.665476 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" podUID="b48297c6-fda2-4f05-aee3-510ac3d2d03b" containerName="dnsmasq-dns" containerID="cri-o://a0e8443a9e4e0db451bb1d5078d6afb8b3a5a8dfbed59c32524b3d5e414e7566" gracePeriod=10 Nov 25 09:21:28 crc kubenswrapper[4687]: I1125 09:21:28.479826 4687 generic.go:334] "Generic (PLEG): container finished" podID="b48297c6-fda2-4f05-aee3-510ac3d2d03b" containerID="a0e8443a9e4e0db451bb1d5078d6afb8b3a5a8dfbed59c32524b3d5e414e7566" exitCode=0 Nov 25 09:21:28 crc kubenswrapper[4687]: I1125 09:21:28.479909 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" event={"ID":"b48297c6-fda2-4f05-aee3-510ac3d2d03b","Type":"ContainerDied","Data":"a0e8443a9e4e0db451bb1d5078d6afb8b3a5a8dfbed59c32524b3d5e414e7566"} Nov 25 09:21:29 crc kubenswrapper[4687]: I1125 09:21:29.666600 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:29 crc kubenswrapper[4687]: I1125 09:21:29.761726 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvkcj\" (UniqueName: \"kubernetes.io/projected/b48297c6-fda2-4f05-aee3-510ac3d2d03b-kube-api-access-hvkcj\") pod \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " Nov 25 09:21:29 crc kubenswrapper[4687]: I1125 09:21:29.761970 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-config\") pod \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " Nov 25 09:21:29 crc kubenswrapper[4687]: I1125 09:21:29.762032 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-dns-svc\") pod \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\" (UID: \"b48297c6-fda2-4f05-aee3-510ac3d2d03b\") " Nov 25 09:21:29 crc kubenswrapper[4687]: I1125 09:21:29.766723 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b48297c6-fda2-4f05-aee3-510ac3d2d03b-kube-api-access-hvkcj" (OuterVolumeSpecName: "kube-api-access-hvkcj") pod "b48297c6-fda2-4f05-aee3-510ac3d2d03b" (UID: "b48297c6-fda2-4f05-aee3-510ac3d2d03b"). InnerVolumeSpecName "kube-api-access-hvkcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:29 crc kubenswrapper[4687]: I1125 09:21:29.797178 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b48297c6-fda2-4f05-aee3-510ac3d2d03b" (UID: "b48297c6-fda2-4f05-aee3-510ac3d2d03b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:29 crc kubenswrapper[4687]: I1125 09:21:29.814958 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-config" (OuterVolumeSpecName: "config") pod "b48297c6-fda2-4f05-aee3-510ac3d2d03b" (UID: "b48297c6-fda2-4f05-aee3-510ac3d2d03b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:29 crc kubenswrapper[4687]: I1125 09:21:29.863919 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvkcj\" (UniqueName: \"kubernetes.io/projected/b48297c6-fda2-4f05-aee3-510ac3d2d03b-kube-api-access-hvkcj\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:29 crc kubenswrapper[4687]: I1125 09:21:29.863964 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:29 crc kubenswrapper[4687]: I1125 09:21:29.863994 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b48297c6-fda2-4f05-aee3-510ac3d2d03b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:30 crc kubenswrapper[4687]: I1125 09:21:30.498199 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" event={"ID":"b48297c6-fda2-4f05-aee3-510ac3d2d03b","Type":"ContainerDied","Data":"7c6f51274254f05aae5a8ec1542c8fb6d838d7f576dab44d8c33ca6ab9122e06"} Nov 25 09:21:30 crc kubenswrapper[4687]: I1125 09:21:30.498286 4687 scope.go:117] "RemoveContainer" containerID="a0e8443a9e4e0db451bb1d5078d6afb8b3a5a8dfbed59c32524b3d5e414e7566" Nov 25 09:21:30 crc kubenswrapper[4687]: I1125 09:21:30.498328 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7snmr" Nov 25 09:21:30 crc kubenswrapper[4687]: I1125 09:21:30.536221 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7snmr"] Nov 25 09:21:30 crc kubenswrapper[4687]: I1125 09:21:30.545047 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7snmr"] Nov 25 09:21:31 crc kubenswrapper[4687]: I1125 09:21:31.116829 4687 scope.go:117] "RemoveContainer" containerID="f29eec8bd25b9ddc056a71a70e611d7bfb0262100e023938c432699bec0f5939" Nov 25 09:21:31 crc kubenswrapper[4687]: I1125 09:21:31.506209 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8b16fcc2-1dd1-47d5-979a-f50611173736","Type":"ContainerStarted","Data":"d8e1f770987b919b3522a2c588b75b03cdb8de439ad93671cba14dc5375aa202"} Nov 25 09:21:31 crc kubenswrapper[4687]: I1125 09:21:31.519827 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8","Type":"ContainerStarted","Data":"c46dbee2c2c08b9d3ec88186ec5c8935c864a891fd5bcd8c02db299450490012"} Nov 25 09:21:31 crc kubenswrapper[4687]: I1125 09:21:31.744219 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b48297c6-fda2-4f05-aee3-510ac3d2d03b" path="/var/lib/kubelet/pods/b48297c6-fda2-4f05-aee3-510ac3d2d03b/volumes" Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.540205 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1c8dccfc-e66b-4a60-b0c4-d7c94652dd12","Type":"ContainerStarted","Data":"063ccf89159c0cd4d2cf70e1bd2fd09923dd1d73727983993e2764b5914f3543"} Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.541852 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.548050 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7","Type":"ContainerStarted","Data":"c595258a961b41004a924e3c4310ee9e02391fc1013460452a08f5fb21a05e95"} Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.551437 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9d20bc24-507c-4712-8c05-c8d3cfd4e87f","Type":"ContainerStarted","Data":"06bb23cef43c20ac850a209d18da17e447e9cf935af5ab9a9828dad5648df9bf"} Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.552784 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee","Type":"ContainerStarted","Data":"9916f15a8da56f333d7b7d285a6679cc365924b75d455e2c7dae8ee65095fcab"} Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.558956 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ae74a803-c417-4ab8-8842-20a575b77dd3","Type":"ContainerStarted","Data":"9db22ea5c3e9bfb6f46efd51164b6b2e50e273dab39300de2f7684d361c09c40"} Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.561360 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=11.349172982 podStartE2EDuration="24.56134425s" podCreationTimestamp="2025-11-25 09:21:08 +0000 UTC" firstStartedPulling="2025-11-25 09:21:18.049767 +0000 UTC m=+1073.103406718" lastFinishedPulling="2025-11-25 09:21:31.261938268 +0000 UTC m=+1086.315577986" observedRunningTime="2025-11-25 09:21:32.559158651 +0000 UTC m=+1087.612798379" watchObservedRunningTime="2025-11-25 09:21:32.56134425 +0000 UTC m=+1087.614983978" Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.561410 4687 generic.go:334] "Generic (PLEG): container finished" podID="ec3fcef6-578a-4687-9af6-18d6de32f1e1" containerID="dc68218e234723368a01bef5d38112d2ad69019299fdc59c2fa098150e6ff1b4" exitCode=0 Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.561492 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hstvx" event={"ID":"ec3fcef6-578a-4687-9af6-18d6de32f1e1","Type":"ContainerDied","Data":"dc68218e234723368a01bef5d38112d2ad69019299fdc59c2fa098150e6ff1b4"} Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.567220 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7","Type":"ContainerStarted","Data":"3eaae14b32dd1441f0f4b5afa9dbed198e2d2a014e60d97302deff0fdd7d2276"} Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.571244 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fdpzn" event={"ID":"6894bad0-9f1e-4d44-89a3-b06c6b24495a","Type":"ContainerStarted","Data":"7efe45867b8bd8ba7a1f0b9a7842883611aa52ad4ebbf0ca22ef8a9c6425f987"} Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.571513 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.658278 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-fdpzn" Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.659790 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=18.775463407 podStartE2EDuration="26.659776203s" podCreationTimestamp="2025-11-25 09:21:06 +0000 UTC" firstStartedPulling="2025-11-25 09:21:18.054065338 +0000 UTC m=+1073.107705056" lastFinishedPulling="2025-11-25 09:21:25.938378134 +0000 UTC m=+1080.992017852" observedRunningTime="2025-11-25 09:21:32.649182993 +0000 UTC m=+1087.702822731" watchObservedRunningTime="2025-11-25 09:21:32.659776203 +0000 UTC m=+1087.713415921" Nov 25 09:21:32 crc kubenswrapper[4687]: I1125 09:21:32.713481 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-fdpzn" podStartSLOduration=9.52149444 podStartE2EDuration="20.713457221s" podCreationTimestamp="2025-11-25 09:21:12 +0000 UTC" firstStartedPulling="2025-11-25 09:21:19.608710661 +0000 UTC m=+1074.662350379" lastFinishedPulling="2025-11-25 09:21:30.800673442 +0000 UTC m=+1085.854313160" observedRunningTime="2025-11-25 09:21:32.707326244 +0000 UTC m=+1087.760965962" watchObservedRunningTime="2025-11-25 09:21:32.713457221 +0000 UTC m=+1087.767096939" Nov 25 09:21:33 crc kubenswrapper[4687]: I1125 09:21:33.580672 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hstvx" event={"ID":"ec3fcef6-578a-4687-9af6-18d6de32f1e1","Type":"ContainerStarted","Data":"07ac83bf57807724a6d03eeac0044b8f00361afbd91245da7ce6a5ccadd18108"} Nov 25 09:21:33 crc kubenswrapper[4687]: I1125 09:21:33.581141 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hstvx" event={"ID":"ec3fcef6-578a-4687-9af6-18d6de32f1e1","Type":"ContainerStarted","Data":"5f9ed93802a545ccb7ed2e2c228f2bdabf1df4bdb351b7ed150356a7ef62ac7b"} Nov 25 09:21:33 crc kubenswrapper[4687]: I1125 09:21:33.582388 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:33 crc kubenswrapper[4687]: I1125 09:21:33.582778 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:21:33 crc kubenswrapper[4687]: I1125 09:21:33.608384 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-hstvx" podStartSLOduration=11.818253133 podStartE2EDuration="21.608364429s" podCreationTimestamp="2025-11-25 09:21:12 +0000 UTC" firstStartedPulling="2025-11-25 09:21:20.476039595 +0000 UTC m=+1075.529679313" lastFinishedPulling="2025-11-25 09:21:30.266150891 +0000 UTC m=+1085.319790609" observedRunningTime="2025-11-25 09:21:33.599137117 +0000 UTC m=+1088.652776835" watchObservedRunningTime="2025-11-25 09:21:33.608364429 +0000 UTC m=+1088.662004157" Nov 25 09:21:35 crc kubenswrapper[4687]: I1125 09:21:35.597964 4687 generic.go:334] "Generic (PLEG): container finished" podID="b1c0236e-917b-4c65-a9b7-6d3508c1f4a8" containerID="c46dbee2c2c08b9d3ec88186ec5c8935c864a891fd5bcd8c02db299450490012" exitCode=0 Nov 25 09:21:35 crc kubenswrapper[4687]: I1125 09:21:35.598041 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8","Type":"ContainerDied","Data":"c46dbee2c2c08b9d3ec88186ec5c8935c864a891fd5bcd8c02db299450490012"} Nov 25 09:21:35 crc kubenswrapper[4687]: I1125 09:21:35.602022 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9d20bc24-507c-4712-8c05-c8d3cfd4e87f","Type":"ContainerStarted","Data":"04778b8dd86ec19ec2eb94cd405df64f0ab9950dddfdd194b27ae5bad1e346fe"} Nov 25 09:21:35 crc kubenswrapper[4687]: I1125 09:21:35.603941 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ae74a803-c417-4ab8-8842-20a575b77dd3","Type":"ContainerStarted","Data":"2b0f035a6d1c6ffc54b3f9e328c7508f0829738929e25d3082a12b2774eaf032"} Nov 25 09:21:35 crc kubenswrapper[4687]: I1125 09:21:35.605632 4687 generic.go:334] "Generic (PLEG): container finished" podID="f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7" containerID="3eaae14b32dd1441f0f4b5afa9dbed198e2d2a014e60d97302deff0fdd7d2276" exitCode=0 Nov 25 09:21:35 crc kubenswrapper[4687]: I1125 09:21:35.605684 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7","Type":"ContainerDied","Data":"3eaae14b32dd1441f0f4b5afa9dbed198e2d2a014e60d97302deff0fdd7d2276"} Nov 25 09:21:35 crc kubenswrapper[4687]: I1125 09:21:35.659922 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=6.394858074 podStartE2EDuration="21.659902083s" podCreationTimestamp="2025-11-25 09:21:14 +0000 UTC" firstStartedPulling="2025-11-25 09:21:19.650915546 +0000 UTC m=+1074.704555264" lastFinishedPulling="2025-11-25 09:21:34.915959535 +0000 UTC m=+1089.969599273" observedRunningTime="2025-11-25 09:21:35.657335124 +0000 UTC m=+1090.710974852" watchObservedRunningTime="2025-11-25 09:21:35.659902083 +0000 UTC m=+1090.713541811" Nov 25 09:21:35 crc kubenswrapper[4687]: I1125 09:21:35.701980 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=9.522460655 podStartE2EDuration="24.701955034s" podCreationTimestamp="2025-11-25 09:21:11 +0000 UTC" firstStartedPulling="2025-11-25 09:21:19.760174555 +0000 UTC m=+1074.813814273" lastFinishedPulling="2025-11-25 09:21:34.939668924 +0000 UTC m=+1089.993308652" observedRunningTime="2025-11-25 09:21:35.678904424 +0000 UTC m=+1090.732544142" watchObservedRunningTime="2025-11-25 09:21:35.701955034 +0000 UTC m=+1090.755594752" Nov 25 09:21:35 crc kubenswrapper[4687]: I1125 09:21:35.995906 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:36 crc kubenswrapper[4687]: I1125 09:21:36.620208 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b1c0236e-917b-4c65-a9b7-6d3508c1f4a8","Type":"ContainerStarted","Data":"c27bf5cb32d50ad1cc268bfc5ef470f9eb80b3b64122a45316c0110b4cd57f11"} Nov 25 09:21:36 crc kubenswrapper[4687]: I1125 09:21:36.624135 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7","Type":"ContainerStarted","Data":"01fc3307c32b3bf67723056c052f53c156dd63f35df64d9469cbdaa0d7ffeeb1"} Nov 25 09:21:36 crc kubenswrapper[4687]: I1125 09:21:36.650683 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=21.43142851 podStartE2EDuration="31.650654574s" podCreationTimestamp="2025-11-25 09:21:05 +0000 UTC" firstStartedPulling="2025-11-25 09:21:19.384688413 +0000 UTC m=+1074.438328131" lastFinishedPulling="2025-11-25 09:21:29.603914457 +0000 UTC m=+1084.657554195" observedRunningTime="2025-11-25 09:21:36.645464071 +0000 UTC m=+1091.699103809" watchObservedRunningTime="2025-11-25 09:21:36.650654574 +0000 UTC m=+1091.704294312" Nov 25 09:21:36 crc kubenswrapper[4687]: I1125 09:21:36.674448 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:36 crc kubenswrapper[4687]: I1125 09:21:36.674513 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:36 crc kubenswrapper[4687]: I1125 09:21:36.928921 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:36 crc kubenswrapper[4687]: I1125 09:21:36.988393 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 09:21:36 crc kubenswrapper[4687]: I1125 09:21:36.993132 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:36 crc kubenswrapper[4687]: I1125 09:21:36.996561 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.014593 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=24.679488188 podStartE2EDuration="34.014574028s" podCreationTimestamp="2025-11-25 09:21:03 +0000 UTC" firstStartedPulling="2025-11-25 09:21:19.371996666 +0000 UTC m=+1074.425636384" lastFinishedPulling="2025-11-25 09:21:28.707082506 +0000 UTC m=+1083.760722224" observedRunningTime="2025-11-25 09:21:36.674482675 +0000 UTC m=+1091.728122393" watchObservedRunningTime="2025-11-25 09:21:37.014574028 +0000 UTC m=+1092.068213746" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.100322 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.629648 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.669085 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.687086 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.946984 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-wch8m"] Nov 25 09:21:37 crc kubenswrapper[4687]: E1125 09:21:37.947321 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b48297c6-fda2-4f05-aee3-510ac3d2d03b" containerName="dnsmasq-dns" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.947338 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b48297c6-fda2-4f05-aee3-510ac3d2d03b" containerName="dnsmasq-dns" Nov 25 09:21:37 crc kubenswrapper[4687]: E1125 09:21:37.947356 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b48297c6-fda2-4f05-aee3-510ac3d2d03b" containerName="init" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.947364 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b48297c6-fda2-4f05-aee3-510ac3d2d03b" containerName="init" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.947580 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b48297c6-fda2-4f05-aee3-510ac3d2d03b" containerName="dnsmasq-dns" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.948405 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.950724 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 09:21:37 crc kubenswrapper[4687]: I1125 09:21:37.984591 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-wch8m"] Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.048273 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-g5gzm"] Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.049286 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.059151 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.063200 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-g5gzm"] Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.127465 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-config\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.127546 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.127602 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.127648 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z2pg\" (UniqueName: \"kubernetes.io/projected/e77f6887-5c89-4fef-a0c6-8a6ead617082-kube-api-access-7z2pg\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.141844 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-wch8m"] Nov 25 09:21:38 crc kubenswrapper[4687]: E1125 09:21:38.142398 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-7z2pg ovsdbserver-nb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" podUID="e77f6887-5c89-4fef-a0c6-8a6ead617082" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.172568 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-sg87d"] Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.173905 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.175648 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-sg87d"] Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.179723 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.200723 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.202290 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.207564 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.207852 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.208031 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-6q4hn" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.208050 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.226130 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.232231 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-combined-ca-bundle\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.232291 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.232318 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-config\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.232351 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.232378 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-ovs-rundir\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.232398 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9w9j\" (UniqueName: \"kubernetes.io/projected/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-kube-api-access-b9w9j\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.232416 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.232447 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z2pg\" (UniqueName: \"kubernetes.io/projected/e77f6887-5c89-4fef-a0c6-8a6ead617082-kube-api-access-7z2pg\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.232469 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-config\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.232488 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-ovn-rundir\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.233404 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-config\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.233760 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.233960 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.281448 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z2pg\" (UniqueName: \"kubernetes.io/projected/e77f6887-5c89-4fef-a0c6-8a6ead617082-kube-api-access-7z2pg\") pod \"dnsmasq-dns-5bf47b49b7-wch8m\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.333834 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e2efef0-3880-4d7a-bd93-59b596e470b8-scripts\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.333886 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-config\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.333903 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e2efef0-3880-4d7a-bd93-59b596e470b8-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.333926 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-combined-ca-bundle\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.333950 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e2efef0-3880-4d7a-bd93-59b596e470b8-config\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.333969 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrjns\" (UniqueName: \"kubernetes.io/projected/4e2efef0-3880-4d7a-bd93-59b596e470b8-kube-api-access-mrjns\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334136 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334218 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334259 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4e2efef0-3880-4d7a-bd93-59b596e470b8-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334322 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-ovs-rundir\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334352 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9w9j\" (UniqueName: \"kubernetes.io/projected/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-kube-api-access-b9w9j\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334401 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e2efef0-3880-4d7a-bd93-59b596e470b8-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334423 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw4qh\" (UniqueName: \"kubernetes.io/projected/2969d39b-717f-4198-a9d9-e0a0723f20f8-kube-api-access-gw4qh\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334474 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-config\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334498 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-ovn-rundir\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334567 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334604 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-dns-svc\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.334626 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e2efef0-3880-4d7a-bd93-59b596e470b8-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.335128 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-ovs-rundir\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.335197 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-ovn-rundir\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.335837 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-config\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.337456 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-combined-ca-bundle\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.344842 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.355238 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9w9j\" (UniqueName: \"kubernetes.io/projected/b9a1cd27-5b10-422d-9629-a5a6c0bc128a-kube-api-access-b9w9j\") pod \"ovn-controller-metrics-g5gzm\" (UID: \"b9a1cd27-5b10-422d-9629-a5a6c0bc128a\") " pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.369698 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-g5gzm" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.435880 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.435964 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4e2efef0-3880-4d7a-bd93-59b596e470b8-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436031 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e2efef0-3880-4d7a-bd93-59b596e470b8-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436060 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw4qh\" (UniqueName: \"kubernetes.io/projected/2969d39b-717f-4198-a9d9-e0a0723f20f8-kube-api-access-gw4qh\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436109 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436140 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-dns-svc\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436163 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e2efef0-3880-4d7a-bd93-59b596e470b8-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436202 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e2efef0-3880-4d7a-bd93-59b596e470b8-scripts\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436230 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-config\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436250 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e2efef0-3880-4d7a-bd93-59b596e470b8-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436279 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e2efef0-3880-4d7a-bd93-59b596e470b8-config\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436305 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrjns\" (UniqueName: \"kubernetes.io/projected/4e2efef0-3880-4d7a-bd93-59b596e470b8-kube-api-access-mrjns\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.436547 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4e2efef0-3880-4d7a-bd93-59b596e470b8-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.437008 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.437548 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e2efef0-3880-4d7a-bd93-59b596e470b8-scripts\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.437876 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-dns-svc\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.438132 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.438212 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-config\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.438233 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e2efef0-3880-4d7a-bd93-59b596e470b8-config\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.441208 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e2efef0-3880-4d7a-bd93-59b596e470b8-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.445809 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e2efef0-3880-4d7a-bd93-59b596e470b8-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.455459 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e2efef0-3880-4d7a-bd93-59b596e470b8-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.459312 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrjns\" (UniqueName: \"kubernetes.io/projected/4e2efef0-3880-4d7a-bd93-59b596e470b8-kube-api-access-mrjns\") pod \"ovn-northd-0\" (UID: \"4e2efef0-3880-4d7a-bd93-59b596e470b8\") " pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.470109 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw4qh\" (UniqueName: \"kubernetes.io/projected/2969d39b-717f-4198-a9d9-e0a0723f20f8-kube-api-access-gw4qh\") pod \"dnsmasq-dns-8554648995-sg87d\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.495245 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.521320 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.635309 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.646767 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.740571 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7z2pg\" (UniqueName: \"kubernetes.io/projected/e77f6887-5c89-4fef-a0c6-8a6ead617082-kube-api-access-7z2pg\") pod \"e77f6887-5c89-4fef-a0c6-8a6ead617082\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.740658 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-config\") pod \"e77f6887-5c89-4fef-a0c6-8a6ead617082\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.740767 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-dns-svc\") pod \"e77f6887-5c89-4fef-a0c6-8a6ead617082\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.740901 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-ovsdbserver-nb\") pod \"e77f6887-5c89-4fef-a0c6-8a6ead617082\" (UID: \"e77f6887-5c89-4fef-a0c6-8a6ead617082\") " Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.741068 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-config" (OuterVolumeSpecName: "config") pod "e77f6887-5c89-4fef-a0c6-8a6ead617082" (UID: "e77f6887-5c89-4fef-a0c6-8a6ead617082"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.741132 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e77f6887-5c89-4fef-a0c6-8a6ead617082" (UID: "e77f6887-5c89-4fef-a0c6-8a6ead617082"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.741698 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.741712 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.742395 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e77f6887-5c89-4fef-a0c6-8a6ead617082" (UID: "e77f6887-5c89-4fef-a0c6-8a6ead617082"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.748122 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e77f6887-5c89-4fef-a0c6-8a6ead617082-kube-api-access-7z2pg" (OuterVolumeSpecName: "kube-api-access-7z2pg") pod "e77f6887-5c89-4fef-a0c6-8a6ead617082" (UID: "e77f6887-5c89-4fef-a0c6-8a6ead617082"). InnerVolumeSpecName "kube-api-access-7z2pg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.838697 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-g5gzm"] Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.843541 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e77f6887-5c89-4fef-a0c6-8a6ead617082-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.843575 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7z2pg\" (UniqueName: \"kubernetes.io/projected/e77f6887-5c89-4fef-a0c6-8a6ead617082-kube-api-access-7z2pg\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.862282 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.951715 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-sg87d"] Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.983980 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tk9nm"] Nov 25 09:21:38 crc kubenswrapper[4687]: I1125 09:21:38.985201 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.004706 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tk9nm"] Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.032137 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-sg87d"] Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.098905 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.152411 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smm5m\" (UniqueName: \"kubernetes.io/projected/e3000b5f-af25-46df-9c97-42da2552090b-kube-api-access-smm5m\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.152458 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-config\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.152492 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.152574 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.152593 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.255280 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smm5m\" (UniqueName: \"kubernetes.io/projected/e3000b5f-af25-46df-9c97-42da2552090b-kube-api-access-smm5m\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.255797 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-config\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.255857 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.255970 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.256016 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.258121 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.258748 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.259787 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.260046 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-config\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.277213 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smm5m\" (UniqueName: \"kubernetes.io/projected/e3000b5f-af25-46df-9c97-42da2552090b-kube-api-access-smm5m\") pod \"dnsmasq-dns-b8fbc5445-tk9nm\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.312399 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.644474 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-g5gzm" event={"ID":"b9a1cd27-5b10-422d-9629-a5a6c0bc128a","Type":"ContainerStarted","Data":"b9cb5886c729d3f9fd77aa37309af246ff1fedbf08e3d551149cf56295f52829"} Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.644797 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-g5gzm" event={"ID":"b9a1cd27-5b10-422d-9629-a5a6c0bc128a","Type":"ContainerStarted","Data":"42c6b0b68669b89f8ebdb173787e0c4f1956c87c0d1e57f503bfaa845776cd2f"} Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.647010 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"4e2efef0-3880-4d7a-bd93-59b596e470b8","Type":"ContainerStarted","Data":"aee531c52a5b9b4f2c250120d4e3cd47d252764be7056ec2965361f7608d7633"} Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.648640 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-sg87d" event={"ID":"2969d39b-717f-4198-a9d9-e0a0723f20f8","Type":"ContainerStarted","Data":"1d378017f869ea4d28bf9f98b689205805ec3baecc5a570fb46a31b381031ea1"} Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.648874 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-wch8m" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.701176 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-wch8m"] Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.706543 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-wch8m"] Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.746057 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e77f6887-5c89-4fef-a0c6-8a6ead617082" path="/var/lib/kubelet/pods/e77f6887-5c89-4fef-a0c6-8a6ead617082/volumes" Nov 25 09:21:39 crc kubenswrapper[4687]: I1125 09:21:39.792711 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tk9nm"] Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.053693 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.059263 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.061658 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-kln7l" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.061681 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.061840 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.065358 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.077287 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.178375 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2q22\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-kube-api-access-p2q22\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.178450 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.178493 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.178722 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-lock\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.178803 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-cache\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.279836 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2q22\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-kube-api-access-p2q22\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.279891 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.279926 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.279946 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-lock\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.279967 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-cache\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.280443 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-cache\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: E1125 09:21:40.280801 4687 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:21:40 crc kubenswrapper[4687]: E1125 09:21:40.280819 4687 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:21:40 crc kubenswrapper[4687]: E1125 09:21:40.280852 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift podName:1f6a5d97-063d-47e3-b049-fd2b9b46ee77 nodeName:}" failed. No retries permitted until 2025-11-25 09:21:40.780837669 +0000 UTC m=+1095.834477387 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift") pod "swift-storage-0" (UID: "1f6a5d97-063d-47e3-b049-fd2b9b46ee77") : configmap "swift-ring-files" not found Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.281185 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.286317 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-lock\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.300025 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2q22\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-kube-api-access-p2q22\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.304534 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.656974 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" event={"ID":"e3000b5f-af25-46df-9c97-42da2552090b","Type":"ContainerStarted","Data":"3063516fd0ab1c82c3176245c2335fe4566aa336399e6711de80eda557bf292a"} Nov 25 09:21:40 crc kubenswrapper[4687]: I1125 09:21:40.786877 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:40 crc kubenswrapper[4687]: E1125 09:21:40.787083 4687 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:21:40 crc kubenswrapper[4687]: E1125 09:21:40.787241 4687 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:21:40 crc kubenswrapper[4687]: E1125 09:21:40.787291 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift podName:1f6a5d97-063d-47e3-b049-fd2b9b46ee77 nodeName:}" failed. No retries permitted until 2025-11-25 09:21:41.787275941 +0000 UTC m=+1096.840915659 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift") pod "swift-storage-0" (UID: "1f6a5d97-063d-47e3-b049-fd2b9b46ee77") : configmap "swift-ring-files" not found Nov 25 09:21:41 crc kubenswrapper[4687]: I1125 09:21:41.804265 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:41 crc kubenswrapper[4687]: E1125 09:21:41.804581 4687 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:21:41 crc kubenswrapper[4687]: E1125 09:21:41.805169 4687 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:21:41 crc kubenswrapper[4687]: E1125 09:21:41.805232 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift podName:1f6a5d97-063d-47e3-b049-fd2b9b46ee77 nodeName:}" failed. No retries permitted until 2025-11-25 09:21:43.805213424 +0000 UTC m=+1098.858853152 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift") pod "swift-storage-0" (UID: "1f6a5d97-063d-47e3-b049-fd2b9b46ee77") : configmap "swift-ring-files" not found Nov 25 09:21:42 crc kubenswrapper[4687]: I1125 09:21:42.672970 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-sg87d" event={"ID":"2969d39b-717f-4198-a9d9-e0a0723f20f8","Type":"ContainerStarted","Data":"18ab3e194c9e224af36a8e6fd29485709d0b759dcb89f19b2d49bfd17931fa9c"} Nov 25 09:21:43 crc kubenswrapper[4687]: I1125 09:21:43.846681 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:43 crc kubenswrapper[4687]: E1125 09:21:43.846854 4687 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:21:43 crc kubenswrapper[4687]: E1125 09:21:43.846872 4687 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:21:43 crc kubenswrapper[4687]: E1125 09:21:43.846917 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift podName:1f6a5d97-063d-47e3-b049-fd2b9b46ee77 nodeName:}" failed. No retries permitted until 2025-11-25 09:21:47.846902061 +0000 UTC m=+1102.900541779 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift") pod "swift-storage-0" (UID: "1f6a5d97-063d-47e3-b049-fd2b9b46ee77") : configmap "swift-ring-files" not found Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.023951 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-qxbhv"] Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.025256 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.027307 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.027423 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.027849 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.044416 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-qxbhv"] Nov 25 09:21:44 crc kubenswrapper[4687]: E1125 09:21:44.045172 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-k5t68 ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-k5t68 ring-data-devices scripts swiftconf]: context canceled" pod="openstack/swift-ring-rebalance-qxbhv" podUID="0b065b05-f379-4928-be76-7fc716dcc1da" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.059262 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-qxbhv"] Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.068319 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-7x797"] Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.069348 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.088472 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-7x797"] Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.151275 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-ring-data-devices\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.151333 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/aee1a7b3-633b-455a-903a-7b00ef90ea07-etc-swift\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.151361 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0b065b05-f379-4928-be76-7fc716dcc1da-etc-swift\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.151384 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hxtn\" (UniqueName: \"kubernetes.io/projected/aee1a7b3-633b-455a-903a-7b00ef90ea07-kube-api-access-2hxtn\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.151464 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-scripts\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.151525 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-dispersionconf\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.155824 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-ring-data-devices\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.155901 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5t68\" (UniqueName: \"kubernetes.io/projected/0b065b05-f379-4928-be76-7fc716dcc1da-kube-api-access-k5t68\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.155930 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-swiftconf\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.155953 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-combined-ca-bundle\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.155998 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-combined-ca-bundle\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.156051 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-swiftconf\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.156105 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-dispersionconf\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.156142 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-scripts\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.257479 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-combined-ca-bundle\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.257891 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-swiftconf\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.258586 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-dispersionconf\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.258735 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-scripts\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.258928 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-ring-data-devices\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.259044 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/aee1a7b3-633b-455a-903a-7b00ef90ea07-etc-swift\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.259156 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0b065b05-f379-4928-be76-7fc716dcc1da-etc-swift\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.259263 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hxtn\" (UniqueName: \"kubernetes.io/projected/aee1a7b3-633b-455a-903a-7b00ef90ea07-kube-api-access-2hxtn\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.259371 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-scripts\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.259483 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-dispersionconf\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.259632 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-ring-data-devices\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.259754 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5t68\" (UniqueName: \"kubernetes.io/projected/0b065b05-f379-4928-be76-7fc716dcc1da-kube-api-access-k5t68\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.259855 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-swiftconf\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.259961 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-combined-ca-bundle\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.265402 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-ring-data-devices\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.265707 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-swiftconf\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.266077 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-scripts\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.266194 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0b065b05-f379-4928-be76-7fc716dcc1da-etc-swift\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.266220 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-scripts\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.266350 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/aee1a7b3-633b-455a-903a-7b00ef90ea07-etc-swift\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.273099 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-ring-data-devices\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.273339 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-dispersionconf\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.273917 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-swiftconf\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.274555 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-combined-ca-bundle\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.274822 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-dispersionconf\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.275324 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-combined-ca-bundle\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.293382 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hxtn\" (UniqueName: \"kubernetes.io/projected/aee1a7b3-633b-455a-903a-7b00ef90ea07-kube-api-access-2hxtn\") pod \"swift-ring-rebalance-7x797\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.302246 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5t68\" (UniqueName: \"kubernetes.io/projected/0b065b05-f379-4928-be76-7fc716dcc1da-kube-api-access-k5t68\") pod \"swift-ring-rebalance-qxbhv\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.386895 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.689048 4687 generic.go:334] "Generic (PLEG): container finished" podID="e3000b5f-af25-46df-9c97-42da2552090b" containerID="995a538132398c4fa35d58c6a5a4c4ed9ccbbc20e702e847c87aba05708147e9" exitCode=0 Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.689149 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" event={"ID":"e3000b5f-af25-46df-9c97-42da2552090b","Type":"ContainerDied","Data":"995a538132398c4fa35d58c6a5a4c4ed9ccbbc20e702e847c87aba05708147e9"} Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.691614 4687 generic.go:334] "Generic (PLEG): container finished" podID="2969d39b-717f-4198-a9d9-e0a0723f20f8" containerID="18ab3e194c9e224af36a8e6fd29485709d0b759dcb89f19b2d49bfd17931fa9c" exitCode=0 Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.691683 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.691815 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-sg87d" event={"ID":"2969d39b-717f-4198-a9d9-e0a0723f20f8","Type":"ContainerDied","Data":"18ab3e194c9e224af36a8e6fd29485709d0b759dcb89f19b2d49bfd17931fa9c"} Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.707475 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.736334 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-g5gzm" podStartSLOduration=6.736314998 podStartE2EDuration="6.736314998s" podCreationTimestamp="2025-11-25 09:21:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:21:44.726753537 +0000 UTC m=+1099.780393275" watchObservedRunningTime="2025-11-25 09:21:44.736314998 +0000 UTC m=+1099.789954716" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.851320 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-7x797"] Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.876114 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5t68\" (UniqueName: \"kubernetes.io/projected/0b065b05-f379-4928-be76-7fc716dcc1da-kube-api-access-k5t68\") pod \"0b065b05-f379-4928-be76-7fc716dcc1da\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.876196 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-dispersionconf\") pod \"0b065b05-f379-4928-be76-7fc716dcc1da\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.876240 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-swiftconf\") pod \"0b065b05-f379-4928-be76-7fc716dcc1da\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.876283 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-scripts\") pod \"0b065b05-f379-4928-be76-7fc716dcc1da\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.876314 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-ring-data-devices\") pod \"0b065b05-f379-4928-be76-7fc716dcc1da\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.876419 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-combined-ca-bundle\") pod \"0b065b05-f379-4928-be76-7fc716dcc1da\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.876527 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0b065b05-f379-4928-be76-7fc716dcc1da-etc-swift\") pod \"0b065b05-f379-4928-be76-7fc716dcc1da\" (UID: \"0b065b05-f379-4928-be76-7fc716dcc1da\") " Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.876868 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-scripts" (OuterVolumeSpecName: "scripts") pod "0b065b05-f379-4928-be76-7fc716dcc1da" (UID: "0b065b05-f379-4928-be76-7fc716dcc1da"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.876925 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b065b05-f379-4928-be76-7fc716dcc1da-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "0b065b05-f379-4928-be76-7fc716dcc1da" (UID: "0b065b05-f379-4928-be76-7fc716dcc1da"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.877160 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "0b065b05-f379-4928-be76-7fc716dcc1da" (UID: "0b065b05-f379-4928-be76-7fc716dcc1da"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.878235 4687 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0b065b05-f379-4928-be76-7fc716dcc1da-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.878262 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.879540 4687 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0b065b05-f379-4928-be76-7fc716dcc1da-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.889103 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "0b065b05-f379-4928-be76-7fc716dcc1da" (UID: "0b065b05-f379-4928-be76-7fc716dcc1da"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.892982 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "0b065b05-f379-4928-be76-7fc716dcc1da" (UID: "0b065b05-f379-4928-be76-7fc716dcc1da"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.893840 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0b065b05-f379-4928-be76-7fc716dcc1da" (UID: "0b065b05-f379-4928-be76-7fc716dcc1da"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.912235 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b065b05-f379-4928-be76-7fc716dcc1da-kube-api-access-k5t68" (OuterVolumeSpecName: "kube-api-access-k5t68") pod "0b065b05-f379-4928-be76-7fc716dcc1da" (UID: "0b065b05-f379-4928-be76-7fc716dcc1da"). InnerVolumeSpecName "kube-api-access-k5t68". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.986995 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.987042 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5t68\" (UniqueName: \"kubernetes.io/projected/0b065b05-f379-4928-be76-7fc716dcc1da-kube-api-access-k5t68\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.987058 4687 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:44 crc kubenswrapper[4687]: I1125 09:21:44.987076 4687 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0b065b05-f379-4928-be76-7fc716dcc1da-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:45 crc kubenswrapper[4687]: W1125 09:21:45.050897 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaee1a7b3_633b_455a_903a_7b00ef90ea07.slice/crio-e7b9f79e7e3e373e070fc7587d11db58a86e400cb8967fe4ee64c0a92b35ec71 WatchSource:0}: Error finding container e7b9f79e7e3e373e070fc7587d11db58a86e400cb8967fe4ee64c0a92b35ec71: Status 404 returned error can't find the container with id e7b9f79e7e3e373e070fc7587d11db58a86e400cb8967fe4ee64c0a92b35ec71 Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.063100 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.190446 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-nb\") pod \"2969d39b-717f-4198-a9d9-e0a0723f20f8\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.190636 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-config\") pod \"2969d39b-717f-4198-a9d9-e0a0723f20f8\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.190740 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw4qh\" (UniqueName: \"kubernetes.io/projected/2969d39b-717f-4198-a9d9-e0a0723f20f8-kube-api-access-gw4qh\") pod \"2969d39b-717f-4198-a9d9-e0a0723f20f8\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.190781 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-sb\") pod \"2969d39b-717f-4198-a9d9-e0a0723f20f8\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.190809 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-dns-svc\") pod \"2969d39b-717f-4198-a9d9-e0a0723f20f8\" (UID: \"2969d39b-717f-4198-a9d9-e0a0723f20f8\") " Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.201866 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2969d39b-717f-4198-a9d9-e0a0723f20f8-kube-api-access-gw4qh" (OuterVolumeSpecName: "kube-api-access-gw4qh") pod "2969d39b-717f-4198-a9d9-e0a0723f20f8" (UID: "2969d39b-717f-4198-a9d9-e0a0723f20f8"). InnerVolumeSpecName "kube-api-access-gw4qh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.215057 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2969d39b-717f-4198-a9d9-e0a0723f20f8" (UID: "2969d39b-717f-4198-a9d9-e0a0723f20f8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.218356 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2969d39b-717f-4198-a9d9-e0a0723f20f8" (UID: "2969d39b-717f-4198-a9d9-e0a0723f20f8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.220193 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-config" (OuterVolumeSpecName: "config") pod "2969d39b-717f-4198-a9d9-e0a0723f20f8" (UID: "2969d39b-717f-4198-a9d9-e0a0723f20f8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.237441 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2969d39b-717f-4198-a9d9-e0a0723f20f8" (UID: "2969d39b-717f-4198-a9d9-e0a0723f20f8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.245243 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.245458 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.292762 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw4qh\" (UniqueName: \"kubernetes.io/projected/2969d39b-717f-4198-a9d9-e0a0723f20f8-kube-api-access-gw4qh\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.292794 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.292807 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.292819 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.292831 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2969d39b-717f-4198-a9d9-e0a0723f20f8-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.329394 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.703383 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-sg87d" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.703352 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-sg87d" event={"ID":"2969d39b-717f-4198-a9d9-e0a0723f20f8","Type":"ContainerDied","Data":"1d378017f869ea4d28bf9f98b689205805ec3baecc5a570fb46a31b381031ea1"} Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.703968 4687 scope.go:117] "RemoveContainer" containerID="18ab3e194c9e224af36a8e6fd29485709d0b759dcb89f19b2d49bfd17931fa9c" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.705332 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" event={"ID":"e3000b5f-af25-46df-9c97-42da2552090b","Type":"ContainerStarted","Data":"0d37246c20425f7f23148c6b8bf89892a5f546115351c342cf9c5ad1962e69b9"} Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.705679 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.708808 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7x797" event={"ID":"aee1a7b3-633b-455a-903a-7b00ef90ea07","Type":"ContainerStarted","Data":"e7b9f79e7e3e373e070fc7587d11db58a86e400cb8967fe4ee64c0a92b35ec71"} Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.712406 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"4e2efef0-3880-4d7a-bd93-59b596e470b8","Type":"ContainerStarted","Data":"aae78a7db215234bdba87b307956f743795b42adeda1ebcac731518af22fdd17"} Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.712532 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"4e2efef0-3880-4d7a-bd93-59b596e470b8","Type":"ContainerStarted","Data":"1d40e7d5e5442eedd6e960606cdb9eefbf1a7c7f57e41c875e245594c8ecd39a"} Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.712544 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qxbhv" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.739269 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" podStartSLOduration=7.739252111 podStartE2EDuration="7.739252111s" podCreationTimestamp="2025-11-25 09:21:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:21:45.735274652 +0000 UTC m=+1100.788914380" watchObservedRunningTime="2025-11-25 09:21:45.739252111 +0000 UTC m=+1100.792891819" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.769733 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=1.800679185 podStartE2EDuration="7.769711744s" podCreationTimestamp="2025-11-25 09:21:38 +0000 UTC" firstStartedPulling="2025-11-25 09:21:39.128762577 +0000 UTC m=+1094.182402295" lastFinishedPulling="2025-11-25 09:21:45.097795136 +0000 UTC m=+1100.151434854" observedRunningTime="2025-11-25 09:21:45.760845652 +0000 UTC m=+1100.814485370" watchObservedRunningTime="2025-11-25 09:21:45.769711744 +0000 UTC m=+1100.823351472" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.803419 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.836359 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-sg87d"] Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.854831 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-sg87d"] Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.870548 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-qxbhv"] Nov 25 09:21:45 crc kubenswrapper[4687]: I1125 09:21:45.876703 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-qxbhv"] Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.651170 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-rf7rz"] Nov 25 09:21:46 crc kubenswrapper[4687]: E1125 09:21:46.651879 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2969d39b-717f-4198-a9d9-e0a0723f20f8" containerName="init" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.651897 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="2969d39b-717f-4198-a9d9-e0a0723f20f8" containerName="init" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.652101 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="2969d39b-717f-4198-a9d9-e0a0723f20f8" containerName="init" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.652752 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-rf7rz" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.661093 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bd1e-account-create-5r2nk"] Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.662232 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bd1e-account-create-5r2nk" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.669036 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-rf7rz"] Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.683454 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.693014 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bd1e-account-create-5r2nk"] Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.737877 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.785636 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.832402 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3612478-e4e2-49bb-aecc-1eb23c44975b-operator-scripts\") pod \"keystone-db-create-rf7rz\" (UID: \"d3612478-e4e2-49bb-aecc-1eb23c44975b\") " pod="openstack/keystone-db-create-rf7rz" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.832463 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fd9468-4e1f-4cc9-9711-6e378f4236ec-operator-scripts\") pod \"keystone-bd1e-account-create-5r2nk\" (UID: \"27fd9468-4e1f-4cc9-9711-6e378f4236ec\") " pod="openstack/keystone-bd1e-account-create-5r2nk" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.832647 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpfv4\" (UniqueName: \"kubernetes.io/projected/d3612478-e4e2-49bb-aecc-1eb23c44975b-kube-api-access-mpfv4\") pod \"keystone-db-create-rf7rz\" (UID: \"d3612478-e4e2-49bb-aecc-1eb23c44975b\") " pod="openstack/keystone-db-create-rf7rz" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.832984 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8tb8\" (UniqueName: \"kubernetes.io/projected/27fd9468-4e1f-4cc9-9711-6e378f4236ec-kube-api-access-q8tb8\") pod \"keystone-bd1e-account-create-5r2nk\" (UID: \"27fd9468-4e1f-4cc9-9711-6e378f4236ec\") " pod="openstack/keystone-bd1e-account-create-5r2nk" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.867672 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-cnhz2"] Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.868938 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-cnhz2" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.888900 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-cnhz2"] Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.894957 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5efa-account-create-fz9hn"] Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.895997 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5efa-account-create-fz9hn" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.898695 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.902790 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5efa-account-create-fz9hn"] Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.915766 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.934483 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8tb8\" (UniqueName: \"kubernetes.io/projected/27fd9468-4e1f-4cc9-9711-6e378f4236ec-kube-api-access-q8tb8\") pod \"keystone-bd1e-account-create-5r2nk\" (UID: \"27fd9468-4e1f-4cc9-9711-6e378f4236ec\") " pod="openstack/keystone-bd1e-account-create-5r2nk" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.934622 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3612478-e4e2-49bb-aecc-1eb23c44975b-operator-scripts\") pod \"keystone-db-create-rf7rz\" (UID: \"d3612478-e4e2-49bb-aecc-1eb23c44975b\") " pod="openstack/keystone-db-create-rf7rz" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.934645 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fd9468-4e1f-4cc9-9711-6e378f4236ec-operator-scripts\") pod \"keystone-bd1e-account-create-5r2nk\" (UID: \"27fd9468-4e1f-4cc9-9711-6e378f4236ec\") " pod="openstack/keystone-bd1e-account-create-5r2nk" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.934716 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpfv4\" (UniqueName: \"kubernetes.io/projected/d3612478-e4e2-49bb-aecc-1eb23c44975b-kube-api-access-mpfv4\") pod \"keystone-db-create-rf7rz\" (UID: \"d3612478-e4e2-49bb-aecc-1eb23c44975b\") " pod="openstack/keystone-db-create-rf7rz" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.935679 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3612478-e4e2-49bb-aecc-1eb23c44975b-operator-scripts\") pod \"keystone-db-create-rf7rz\" (UID: \"d3612478-e4e2-49bb-aecc-1eb23c44975b\") " pod="openstack/keystone-db-create-rf7rz" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.936258 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fd9468-4e1f-4cc9-9711-6e378f4236ec-operator-scripts\") pod \"keystone-bd1e-account-create-5r2nk\" (UID: \"27fd9468-4e1f-4cc9-9711-6e378f4236ec\") " pod="openstack/keystone-bd1e-account-create-5r2nk" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.958971 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8tb8\" (UniqueName: \"kubernetes.io/projected/27fd9468-4e1f-4cc9-9711-6e378f4236ec-kube-api-access-q8tb8\") pod \"keystone-bd1e-account-create-5r2nk\" (UID: \"27fd9468-4e1f-4cc9-9711-6e378f4236ec\") " pod="openstack/keystone-bd1e-account-create-5r2nk" Nov 25 09:21:46 crc kubenswrapper[4687]: I1125 09:21:46.961734 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpfv4\" (UniqueName: \"kubernetes.io/projected/d3612478-e4e2-49bb-aecc-1eb23c44975b-kube-api-access-mpfv4\") pod \"keystone-db-create-rf7rz\" (UID: \"d3612478-e4e2-49bb-aecc-1eb23c44975b\") " pod="openstack/keystone-db-create-rf7rz" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.002101 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-rf7rz" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.023454 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bd1e-account-create-5r2nk" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.036450 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4fb1585c-c72a-44ee-8614-3a84312c01ab-operator-scripts\") pod \"placement-db-create-cnhz2\" (UID: \"4fb1585c-c72a-44ee-8614-3a84312c01ab\") " pod="openstack/placement-db-create-cnhz2" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.036581 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbnjn\" (UniqueName: \"kubernetes.io/projected/070421fb-5d0d-4088-bcae-f6d19fdc21fa-kube-api-access-rbnjn\") pod \"placement-5efa-account-create-fz9hn\" (UID: \"070421fb-5d0d-4088-bcae-f6d19fdc21fa\") " pod="openstack/placement-5efa-account-create-fz9hn" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.036654 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/070421fb-5d0d-4088-bcae-f6d19fdc21fa-operator-scripts\") pod \"placement-5efa-account-create-fz9hn\" (UID: \"070421fb-5d0d-4088-bcae-f6d19fdc21fa\") " pod="openstack/placement-5efa-account-create-fz9hn" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.036690 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5sfn\" (UniqueName: \"kubernetes.io/projected/4fb1585c-c72a-44ee-8614-3a84312c01ab-kube-api-access-v5sfn\") pod \"placement-db-create-cnhz2\" (UID: \"4fb1585c-c72a-44ee-8614-3a84312c01ab\") " pod="openstack/placement-db-create-cnhz2" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.138468 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbnjn\" (UniqueName: \"kubernetes.io/projected/070421fb-5d0d-4088-bcae-f6d19fdc21fa-kube-api-access-rbnjn\") pod \"placement-5efa-account-create-fz9hn\" (UID: \"070421fb-5d0d-4088-bcae-f6d19fdc21fa\") " pod="openstack/placement-5efa-account-create-fz9hn" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.138638 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/070421fb-5d0d-4088-bcae-f6d19fdc21fa-operator-scripts\") pod \"placement-5efa-account-create-fz9hn\" (UID: \"070421fb-5d0d-4088-bcae-f6d19fdc21fa\") " pod="openstack/placement-5efa-account-create-fz9hn" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.139539 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5sfn\" (UniqueName: \"kubernetes.io/projected/4fb1585c-c72a-44ee-8614-3a84312c01ab-kube-api-access-v5sfn\") pod \"placement-db-create-cnhz2\" (UID: \"4fb1585c-c72a-44ee-8614-3a84312c01ab\") " pod="openstack/placement-db-create-cnhz2" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.139569 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/070421fb-5d0d-4088-bcae-f6d19fdc21fa-operator-scripts\") pod \"placement-5efa-account-create-fz9hn\" (UID: \"070421fb-5d0d-4088-bcae-f6d19fdc21fa\") " pod="openstack/placement-5efa-account-create-fz9hn" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.139772 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4fb1585c-c72a-44ee-8614-3a84312c01ab-operator-scripts\") pod \"placement-db-create-cnhz2\" (UID: \"4fb1585c-c72a-44ee-8614-3a84312c01ab\") " pod="openstack/placement-db-create-cnhz2" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.140302 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4fb1585c-c72a-44ee-8614-3a84312c01ab-operator-scripts\") pod \"placement-db-create-cnhz2\" (UID: \"4fb1585c-c72a-44ee-8614-3a84312c01ab\") " pod="openstack/placement-db-create-cnhz2" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.160904 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbnjn\" (UniqueName: \"kubernetes.io/projected/070421fb-5d0d-4088-bcae-f6d19fdc21fa-kube-api-access-rbnjn\") pod \"placement-5efa-account-create-fz9hn\" (UID: \"070421fb-5d0d-4088-bcae-f6d19fdc21fa\") " pod="openstack/placement-5efa-account-create-fz9hn" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.163944 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5sfn\" (UniqueName: \"kubernetes.io/projected/4fb1585c-c72a-44ee-8614-3a84312c01ab-kube-api-access-v5sfn\") pod \"placement-db-create-cnhz2\" (UID: \"4fb1585c-c72a-44ee-8614-3a84312c01ab\") " pod="openstack/placement-db-create-cnhz2" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.176239 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-29bts"] Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.177590 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-29bts" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.183870 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-29bts"] Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.192889 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-cnhz2" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.212492 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5efa-account-create-fz9hn" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.263569 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-02d2-account-create-wz57w"] Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.264699 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-02d2-account-create-wz57w" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.267728 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.273972 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-02d2-account-create-wz57w"] Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.343122 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdvt4\" (UniqueName: \"kubernetes.io/projected/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-kube-api-access-pdvt4\") pod \"glance-02d2-account-create-wz57w\" (UID: \"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50\") " pod="openstack/glance-02d2-account-create-wz57w" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.343205 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2370488-8d98-49cc-acc8-52f5c5c77de7-operator-scripts\") pod \"glance-db-create-29bts\" (UID: \"f2370488-8d98-49cc-acc8-52f5c5c77de7\") " pod="openstack/glance-db-create-29bts" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.343271 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-operator-scripts\") pod \"glance-02d2-account-create-wz57w\" (UID: \"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50\") " pod="openstack/glance-02d2-account-create-wz57w" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.343365 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qq57\" (UniqueName: \"kubernetes.io/projected/f2370488-8d98-49cc-acc8-52f5c5c77de7-kube-api-access-7qq57\") pod \"glance-db-create-29bts\" (UID: \"f2370488-8d98-49cc-acc8-52f5c5c77de7\") " pod="openstack/glance-db-create-29bts" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.444578 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdvt4\" (UniqueName: \"kubernetes.io/projected/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-kube-api-access-pdvt4\") pod \"glance-02d2-account-create-wz57w\" (UID: \"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50\") " pod="openstack/glance-02d2-account-create-wz57w" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.444636 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2370488-8d98-49cc-acc8-52f5c5c77de7-operator-scripts\") pod \"glance-db-create-29bts\" (UID: \"f2370488-8d98-49cc-acc8-52f5c5c77de7\") " pod="openstack/glance-db-create-29bts" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.444664 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-operator-scripts\") pod \"glance-02d2-account-create-wz57w\" (UID: \"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50\") " pod="openstack/glance-02d2-account-create-wz57w" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.444698 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qq57\" (UniqueName: \"kubernetes.io/projected/f2370488-8d98-49cc-acc8-52f5c5c77de7-kube-api-access-7qq57\") pod \"glance-db-create-29bts\" (UID: \"f2370488-8d98-49cc-acc8-52f5c5c77de7\") " pod="openstack/glance-db-create-29bts" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.445747 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2370488-8d98-49cc-acc8-52f5c5c77de7-operator-scripts\") pod \"glance-db-create-29bts\" (UID: \"f2370488-8d98-49cc-acc8-52f5c5c77de7\") " pod="openstack/glance-db-create-29bts" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.446193 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-operator-scripts\") pod \"glance-02d2-account-create-wz57w\" (UID: \"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50\") " pod="openstack/glance-02d2-account-create-wz57w" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.487055 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qq57\" (UniqueName: \"kubernetes.io/projected/f2370488-8d98-49cc-acc8-52f5c5c77de7-kube-api-access-7qq57\") pod \"glance-db-create-29bts\" (UID: \"f2370488-8d98-49cc-acc8-52f5c5c77de7\") " pod="openstack/glance-db-create-29bts" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.491993 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdvt4\" (UniqueName: \"kubernetes.io/projected/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-kube-api-access-pdvt4\") pod \"glance-02d2-account-create-wz57w\" (UID: \"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50\") " pod="openstack/glance-02d2-account-create-wz57w" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.554450 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-29bts" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.591099 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-02d2-account-create-wz57w" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.746198 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b065b05-f379-4928-be76-7fc716dcc1da" path="/var/lib/kubelet/pods/0b065b05-f379-4928-be76-7fc716dcc1da/volumes" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.747121 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2969d39b-717f-4198-a9d9-e0a0723f20f8" path="/var/lib/kubelet/pods/2969d39b-717f-4198-a9d9-e0a0723f20f8/volumes" Nov 25 09:21:47 crc kubenswrapper[4687]: I1125 09:21:47.850043 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:47 crc kubenswrapper[4687]: E1125 09:21:47.850941 4687 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:21:47 crc kubenswrapper[4687]: E1125 09:21:47.850969 4687 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:21:47 crc kubenswrapper[4687]: E1125 09:21:47.851017 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift podName:1f6a5d97-063d-47e3-b049-fd2b9b46ee77 nodeName:}" failed. No retries permitted until 2025-11-25 09:21:55.850998141 +0000 UTC m=+1110.904637959 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift") pod "swift-storage-0" (UID: "1f6a5d97-063d-47e3-b049-fd2b9b46ee77") : configmap "swift-ring-files" not found Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.468429 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-rf7rz"] Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.478655 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-cnhz2"] Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.555429 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5efa-account-create-fz9hn"] Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.571130 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-02d2-account-create-wz57w"] Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.653618 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-29bts"] Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.660630 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bd1e-account-create-5r2nk"] Nov 25 09:21:49 crc kubenswrapper[4687]: W1125 09:21:49.662694 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2370488_8d98_49cc_acc8_52f5c5c77de7.slice/crio-c6cc9043f57d567909a27c21c9a2912dd7a17c84345eed0f410804b93f79c026 WatchSource:0}: Error finding container c6cc9043f57d567909a27c21c9a2912dd7a17c84345eed0f410804b93f79c026: Status 404 returned error can't find the container with id c6cc9043f57d567909a27c21c9a2912dd7a17c84345eed0f410804b93f79c026 Nov 25 09:21:49 crc kubenswrapper[4687]: W1125 09:21:49.664867 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27fd9468_4e1f_4cc9_9711_6e378f4236ec.slice/crio-a5661e6bfb2ba652878c6d95ad076886b7fc77b53ea7d705fa26ab84c68df274 WatchSource:0}: Error finding container a5661e6bfb2ba652878c6d95ad076886b7fc77b53ea7d705fa26ab84c68df274: Status 404 returned error can't find the container with id a5661e6bfb2ba652878c6d95ad076886b7fc77b53ea7d705fa26ab84c68df274 Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.776859 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-29bts" event={"ID":"f2370488-8d98-49cc-acc8-52f5c5c77de7","Type":"ContainerStarted","Data":"c6cc9043f57d567909a27c21c9a2912dd7a17c84345eed0f410804b93f79c026"} Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.780175 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7x797" event={"ID":"aee1a7b3-633b-455a-903a-7b00ef90ea07","Type":"ContainerStarted","Data":"b4396e9cbe46737cfed524326f3719dd3a383b7923ac5134afa14be1ba6a5837"} Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.783717 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5efa-account-create-fz9hn" event={"ID":"070421fb-5d0d-4088-bcae-f6d19fdc21fa","Type":"ContainerStarted","Data":"dc9be05bf5bd9171d7d16c604ca30f3d04ab51da4ab237d2c71eb9e331dfe63d"} Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.783768 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5efa-account-create-fz9hn" event={"ID":"070421fb-5d0d-4088-bcae-f6d19fdc21fa","Type":"ContainerStarted","Data":"d033a09bfe250bc195f2d8ae029ad9595cf0eebda1b840a593c3c99bcde999a2"} Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.788180 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-cnhz2" event={"ID":"4fb1585c-c72a-44ee-8614-3a84312c01ab","Type":"ContainerStarted","Data":"0bc179e62474513e3384bc30bb76a711049084865fff62813e5592e387828d3a"} Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.788222 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-cnhz2" event={"ID":"4fb1585c-c72a-44ee-8614-3a84312c01ab","Type":"ContainerStarted","Data":"b66dc3dbc5c30c6ff4a188eaf2e4ec33c0b09bc786e8a78c05fa1d1168a375c5"} Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.790267 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bd1e-account-create-5r2nk" event={"ID":"27fd9468-4e1f-4cc9-9711-6e378f4236ec","Type":"ContainerStarted","Data":"a5661e6bfb2ba652878c6d95ad076886b7fc77b53ea7d705fa26ab84c68df274"} Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.791933 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-02d2-account-create-wz57w" event={"ID":"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50","Type":"ContainerStarted","Data":"e738cd2d46d523e0e00d5db2f117a6ade1ee2b3bace62c8fd86753fa06fd8c00"} Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.793199 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-rf7rz" event={"ID":"d3612478-e4e2-49bb-aecc-1eb23c44975b","Type":"ContainerStarted","Data":"36b614d18c67fd8b04aa78ad48600ee75c2fbbda3aa302f30b939308daca66a8"} Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.793239 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-rf7rz" event={"ID":"d3612478-e4e2-49bb-aecc-1eb23c44975b","Type":"ContainerStarted","Data":"cd7d222d1b32f0287b4911896af7512cb08214a0ade5728c9d81976a549fc57e"} Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.805790 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-7x797" podStartSLOduration=1.938906437 podStartE2EDuration="5.805767851s" podCreationTimestamp="2025-11-25 09:21:44 +0000 UTC" firstStartedPulling="2025-11-25 09:21:45.053999338 +0000 UTC m=+1100.107639056" lastFinishedPulling="2025-11-25 09:21:48.920860742 +0000 UTC m=+1103.974500470" observedRunningTime="2025-11-25 09:21:49.797966458 +0000 UTC m=+1104.851606176" watchObservedRunningTime="2025-11-25 09:21:49.805767851 +0000 UTC m=+1104.859407569" Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.831438 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-02d2-account-create-wz57w" podStartSLOduration=2.831417601 podStartE2EDuration="2.831417601s" podCreationTimestamp="2025-11-25 09:21:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:21:49.826776895 +0000 UTC m=+1104.880416623" watchObservedRunningTime="2025-11-25 09:21:49.831417601 +0000 UTC m=+1104.885057319" Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.850312 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-cnhz2" podStartSLOduration=3.850290786 podStartE2EDuration="3.850290786s" podCreationTimestamp="2025-11-25 09:21:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:21:49.8448894 +0000 UTC m=+1104.898529138" watchObservedRunningTime="2025-11-25 09:21:49.850290786 +0000 UTC m=+1104.903930504" Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.859098 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-rf7rz" podStartSLOduration=3.859079257 podStartE2EDuration="3.859079257s" podCreationTimestamp="2025-11-25 09:21:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:21:49.857980807 +0000 UTC m=+1104.911620535" watchObservedRunningTime="2025-11-25 09:21:49.859079257 +0000 UTC m=+1104.912718985" Nov 25 09:21:49 crc kubenswrapper[4687]: I1125 09:21:49.873835 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5efa-account-create-fz9hn" podStartSLOduration=3.873815729 podStartE2EDuration="3.873815729s" podCreationTimestamp="2025-11-25 09:21:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:21:49.872833482 +0000 UTC m=+1104.926473230" watchObservedRunningTime="2025-11-25 09:21:49.873815729 +0000 UTC m=+1104.927455447" Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.803738 4687 generic.go:334] "Generic (PLEG): container finished" podID="bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50" containerID="cb1ee2042b781d7c9c11978d4b9103b12555c090fc9b4ee25daefca35d2e42d8" exitCode=0 Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.803816 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-02d2-account-create-wz57w" event={"ID":"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50","Type":"ContainerDied","Data":"cb1ee2042b781d7c9c11978d4b9103b12555c090fc9b4ee25daefca35d2e42d8"} Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.807009 4687 generic.go:334] "Generic (PLEG): container finished" podID="d3612478-e4e2-49bb-aecc-1eb23c44975b" containerID="36b614d18c67fd8b04aa78ad48600ee75c2fbbda3aa302f30b939308daca66a8" exitCode=0 Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.807068 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-rf7rz" event={"ID":"d3612478-e4e2-49bb-aecc-1eb23c44975b","Type":"ContainerDied","Data":"36b614d18c67fd8b04aa78ad48600ee75c2fbbda3aa302f30b939308daca66a8"} Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.809351 4687 generic.go:334] "Generic (PLEG): container finished" podID="f2370488-8d98-49cc-acc8-52f5c5c77de7" containerID="6c51f725c9a9061eea651499f0e763af58356c240ea7d27336cc2c01cb025de2" exitCode=0 Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.809421 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-29bts" event={"ID":"f2370488-8d98-49cc-acc8-52f5c5c77de7","Type":"ContainerDied","Data":"6c51f725c9a9061eea651499f0e763af58356c240ea7d27336cc2c01cb025de2"} Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.811285 4687 generic.go:334] "Generic (PLEG): container finished" podID="070421fb-5d0d-4088-bcae-f6d19fdc21fa" containerID="dc9be05bf5bd9171d7d16c604ca30f3d04ab51da4ab237d2c71eb9e331dfe63d" exitCode=0 Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.811317 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5efa-account-create-fz9hn" event={"ID":"070421fb-5d0d-4088-bcae-f6d19fdc21fa","Type":"ContainerDied","Data":"dc9be05bf5bd9171d7d16c604ca30f3d04ab51da4ab237d2c71eb9e331dfe63d"} Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.812919 4687 generic.go:334] "Generic (PLEG): container finished" podID="4fb1585c-c72a-44ee-8614-3a84312c01ab" containerID="0bc179e62474513e3384bc30bb76a711049084865fff62813e5592e387828d3a" exitCode=0 Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.812964 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-cnhz2" event={"ID":"4fb1585c-c72a-44ee-8614-3a84312c01ab","Type":"ContainerDied","Data":"0bc179e62474513e3384bc30bb76a711049084865fff62813e5592e387828d3a"} Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.814384 4687 generic.go:334] "Generic (PLEG): container finished" podID="27fd9468-4e1f-4cc9-9711-6e378f4236ec" containerID="dbb65b8c4108d0bfc32e690df97534d5d0034a3ac627fb87d83f481d1e4f1ac6" exitCode=0 Nov 25 09:21:50 crc kubenswrapper[4687]: I1125 09:21:50.814530 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bd1e-account-create-5r2nk" event={"ID":"27fd9468-4e1f-4cc9-9711-6e378f4236ec","Type":"ContainerDied","Data":"dbb65b8c4108d0bfc32e690df97534d5d0034a3ac627fb87d83f481d1e4f1ac6"} Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.234359 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5efa-account-create-fz9hn" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.339467 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbnjn\" (UniqueName: \"kubernetes.io/projected/070421fb-5d0d-4088-bcae-f6d19fdc21fa-kube-api-access-rbnjn\") pod \"070421fb-5d0d-4088-bcae-f6d19fdc21fa\" (UID: \"070421fb-5d0d-4088-bcae-f6d19fdc21fa\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.339570 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/070421fb-5d0d-4088-bcae-f6d19fdc21fa-operator-scripts\") pod \"070421fb-5d0d-4088-bcae-f6d19fdc21fa\" (UID: \"070421fb-5d0d-4088-bcae-f6d19fdc21fa\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.345651 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/070421fb-5d0d-4088-bcae-f6d19fdc21fa-kube-api-access-rbnjn" (OuterVolumeSpecName: "kube-api-access-rbnjn") pod "070421fb-5d0d-4088-bcae-f6d19fdc21fa" (UID: "070421fb-5d0d-4088-bcae-f6d19fdc21fa"). InnerVolumeSpecName "kube-api-access-rbnjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.349963 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/070421fb-5d0d-4088-bcae-f6d19fdc21fa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "070421fb-5d0d-4088-bcae-f6d19fdc21fa" (UID: "070421fb-5d0d-4088-bcae-f6d19fdc21fa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.441550 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbnjn\" (UniqueName: \"kubernetes.io/projected/070421fb-5d0d-4088-bcae-f6d19fdc21fa-kube-api-access-rbnjn\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.441582 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/070421fb-5d0d-4088-bcae-f6d19fdc21fa-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.461013 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-02d2-account-create-wz57w" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.470494 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bd1e-account-create-5r2nk" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.479536 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-cnhz2" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.487071 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-rf7rz" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.502897 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-29bts" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.542274 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8tb8\" (UniqueName: \"kubernetes.io/projected/27fd9468-4e1f-4cc9-9711-6e378f4236ec-kube-api-access-q8tb8\") pod \"27fd9468-4e1f-4cc9-9711-6e378f4236ec\" (UID: \"27fd9468-4e1f-4cc9-9711-6e378f4236ec\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.542360 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fd9468-4e1f-4cc9-9711-6e378f4236ec-operator-scripts\") pod \"27fd9468-4e1f-4cc9-9711-6e378f4236ec\" (UID: \"27fd9468-4e1f-4cc9-9711-6e378f4236ec\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.542381 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdvt4\" (UniqueName: \"kubernetes.io/projected/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-kube-api-access-pdvt4\") pod \"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50\" (UID: \"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.542411 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-operator-scripts\") pod \"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50\" (UID: \"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.543313 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50" (UID: "bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.544137 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27fd9468-4e1f-4cc9-9711-6e378f4236ec-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "27fd9468-4e1f-4cc9-9711-6e378f4236ec" (UID: "27fd9468-4e1f-4cc9-9711-6e378f4236ec"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.547769 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27fd9468-4e1f-4cc9-9711-6e378f4236ec-kube-api-access-q8tb8" (OuterVolumeSpecName: "kube-api-access-q8tb8") pod "27fd9468-4e1f-4cc9-9711-6e378f4236ec" (UID: "27fd9468-4e1f-4cc9-9711-6e378f4236ec"). InnerVolumeSpecName "kube-api-access-q8tb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.547837 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-kube-api-access-pdvt4" (OuterVolumeSpecName: "kube-api-access-pdvt4") pod "bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50" (UID: "bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50"). InnerVolumeSpecName "kube-api-access-pdvt4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.643486 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpfv4\" (UniqueName: \"kubernetes.io/projected/d3612478-e4e2-49bb-aecc-1eb23c44975b-kube-api-access-mpfv4\") pod \"d3612478-e4e2-49bb-aecc-1eb23c44975b\" (UID: \"d3612478-e4e2-49bb-aecc-1eb23c44975b\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.643653 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3612478-e4e2-49bb-aecc-1eb23c44975b-operator-scripts\") pod \"d3612478-e4e2-49bb-aecc-1eb23c44975b\" (UID: \"d3612478-e4e2-49bb-aecc-1eb23c44975b\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.643725 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4fb1585c-c72a-44ee-8614-3a84312c01ab-operator-scripts\") pod \"4fb1585c-c72a-44ee-8614-3a84312c01ab\" (UID: \"4fb1585c-c72a-44ee-8614-3a84312c01ab\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.643766 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2370488-8d98-49cc-acc8-52f5c5c77de7-operator-scripts\") pod \"f2370488-8d98-49cc-acc8-52f5c5c77de7\" (UID: \"f2370488-8d98-49cc-acc8-52f5c5c77de7\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.643782 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qq57\" (UniqueName: \"kubernetes.io/projected/f2370488-8d98-49cc-acc8-52f5c5c77de7-kube-api-access-7qq57\") pod \"f2370488-8d98-49cc-acc8-52f5c5c77de7\" (UID: \"f2370488-8d98-49cc-acc8-52f5c5c77de7\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.643839 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5sfn\" (UniqueName: \"kubernetes.io/projected/4fb1585c-c72a-44ee-8614-3a84312c01ab-kube-api-access-v5sfn\") pod \"4fb1585c-c72a-44ee-8614-3a84312c01ab\" (UID: \"4fb1585c-c72a-44ee-8614-3a84312c01ab\") " Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.644172 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2370488-8d98-49cc-acc8-52f5c5c77de7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f2370488-8d98-49cc-acc8-52f5c5c77de7" (UID: "f2370488-8d98-49cc-acc8-52f5c5c77de7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.644236 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8tb8\" (UniqueName: \"kubernetes.io/projected/27fd9468-4e1f-4cc9-9711-6e378f4236ec-kube-api-access-q8tb8\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.644249 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fd9468-4e1f-4cc9-9711-6e378f4236ec-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.644258 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdvt4\" (UniqueName: \"kubernetes.io/projected/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-kube-api-access-pdvt4\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.644267 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.644251 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3612478-e4e2-49bb-aecc-1eb23c44975b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d3612478-e4e2-49bb-aecc-1eb23c44975b" (UID: "d3612478-e4e2-49bb-aecc-1eb23c44975b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.644687 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb1585c-c72a-44ee-8614-3a84312c01ab-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4fb1585c-c72a-44ee-8614-3a84312c01ab" (UID: "4fb1585c-c72a-44ee-8614-3a84312c01ab"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.646354 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2370488-8d98-49cc-acc8-52f5c5c77de7-kube-api-access-7qq57" (OuterVolumeSpecName: "kube-api-access-7qq57") pod "f2370488-8d98-49cc-acc8-52f5c5c77de7" (UID: "f2370488-8d98-49cc-acc8-52f5c5c77de7"). InnerVolumeSpecName "kube-api-access-7qq57". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.646885 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3612478-e4e2-49bb-aecc-1eb23c44975b-kube-api-access-mpfv4" (OuterVolumeSpecName: "kube-api-access-mpfv4") pod "d3612478-e4e2-49bb-aecc-1eb23c44975b" (UID: "d3612478-e4e2-49bb-aecc-1eb23c44975b"). InnerVolumeSpecName "kube-api-access-mpfv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.647319 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fb1585c-c72a-44ee-8614-3a84312c01ab-kube-api-access-v5sfn" (OuterVolumeSpecName: "kube-api-access-v5sfn") pod "4fb1585c-c72a-44ee-8614-3a84312c01ab" (UID: "4fb1585c-c72a-44ee-8614-3a84312c01ab"). InnerVolumeSpecName "kube-api-access-v5sfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.746464 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2370488-8d98-49cc-acc8-52f5c5c77de7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.746522 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qq57\" (UniqueName: \"kubernetes.io/projected/f2370488-8d98-49cc-acc8-52f5c5c77de7-kube-api-access-7qq57\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.746540 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5sfn\" (UniqueName: \"kubernetes.io/projected/4fb1585c-c72a-44ee-8614-3a84312c01ab-kube-api-access-v5sfn\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.746553 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpfv4\" (UniqueName: \"kubernetes.io/projected/d3612478-e4e2-49bb-aecc-1eb23c44975b-kube-api-access-mpfv4\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.746567 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3612478-e4e2-49bb-aecc-1eb23c44975b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.746579 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4fb1585c-c72a-44ee-8614-3a84312c01ab-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.838619 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bd1e-account-create-5r2nk" event={"ID":"27fd9468-4e1f-4cc9-9711-6e378f4236ec","Type":"ContainerDied","Data":"a5661e6bfb2ba652878c6d95ad076886b7fc77b53ea7d705fa26ab84c68df274"} Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.838658 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5661e6bfb2ba652878c6d95ad076886b7fc77b53ea7d705fa26ab84c68df274" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.838690 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bd1e-account-create-5r2nk" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.841456 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-02d2-account-create-wz57w" event={"ID":"bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50","Type":"ContainerDied","Data":"e738cd2d46d523e0e00d5db2f117a6ade1ee2b3bace62c8fd86753fa06fd8c00"} Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.841485 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e738cd2d46d523e0e00d5db2f117a6ade1ee2b3bace62c8fd86753fa06fd8c00" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.841581 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-02d2-account-create-wz57w" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.843665 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-rf7rz" event={"ID":"d3612478-e4e2-49bb-aecc-1eb23c44975b","Type":"ContainerDied","Data":"cd7d222d1b32f0287b4911896af7512cb08214a0ade5728c9d81976a549fc57e"} Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.843707 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd7d222d1b32f0287b4911896af7512cb08214a0ade5728c9d81976a549fc57e" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.843714 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-rf7rz" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.847761 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-29bts" event={"ID":"f2370488-8d98-49cc-acc8-52f5c5c77de7","Type":"ContainerDied","Data":"c6cc9043f57d567909a27c21c9a2912dd7a17c84345eed0f410804b93f79c026"} Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.847842 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c6cc9043f57d567909a27c21c9a2912dd7a17c84345eed0f410804b93f79c026" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.847782 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-29bts" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.853669 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5efa-account-create-fz9hn" event={"ID":"070421fb-5d0d-4088-bcae-f6d19fdc21fa","Type":"ContainerDied","Data":"d033a09bfe250bc195f2d8ae029ad9595cf0eebda1b840a593c3c99bcde999a2"} Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.853710 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d033a09bfe250bc195f2d8ae029ad9595cf0eebda1b840a593c3c99bcde999a2" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.853685 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5efa-account-create-fz9hn" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.855613 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-cnhz2" event={"ID":"4fb1585c-c72a-44ee-8614-3a84312c01ab","Type":"ContainerDied","Data":"b66dc3dbc5c30c6ff4a188eaf2e4ec33c0b09bc786e8a78c05fa1d1168a375c5"} Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.855650 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-cnhz2" Nov 25 09:21:52 crc kubenswrapper[4687]: I1125 09:21:52.855652 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b66dc3dbc5c30c6ff4a188eaf2e4ec33c0b09bc786e8a78c05fa1d1168a375c5" Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.314619 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.392034 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mtcxf"] Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.392288 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" podUID="a4c3e61a-c02f-410d-aed1-76fe207f46c5" containerName="dnsmasq-dns" containerID="cri-o://72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b" gracePeriod=10 Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.831228 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.871987 4687 generic.go:334] "Generic (PLEG): container finished" podID="a4c3e61a-c02f-410d-aed1-76fe207f46c5" containerID="72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b" exitCode=0 Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.872024 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" event={"ID":"a4c3e61a-c02f-410d-aed1-76fe207f46c5","Type":"ContainerDied","Data":"72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b"} Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.872049 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" event={"ID":"a4c3e61a-c02f-410d-aed1-76fe207f46c5","Type":"ContainerDied","Data":"b89abe4e8fb0f5c12517cea4eb2a81aeb10246cc0d8d2b169564e976fe428735"} Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.872064 4687 scope.go:117] "RemoveContainer" containerID="72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b" Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.872069 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-mtcxf" Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.906119 4687 scope.go:117] "RemoveContainer" containerID="0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b" Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.928269 4687 scope.go:117] "RemoveContainer" containerID="72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b" Nov 25 09:21:54 crc kubenswrapper[4687]: E1125 09:21:54.929022 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b\": container with ID starting with 72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b not found: ID does not exist" containerID="72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b" Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.929051 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b"} err="failed to get container status \"72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b\": rpc error: code = NotFound desc = could not find container \"72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b\": container with ID starting with 72e101fbea4a41685c54343fc39a3ac98140c32b47c5cb593e9350445bf15f9b not found: ID does not exist" Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.929072 4687 scope.go:117] "RemoveContainer" containerID="0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b" Nov 25 09:21:54 crc kubenswrapper[4687]: E1125 09:21:54.929364 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b\": container with ID starting with 0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b not found: ID does not exist" containerID="0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b" Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.929422 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b"} err="failed to get container status \"0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b\": rpc error: code = NotFound desc = could not find container \"0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b\": container with ID starting with 0571b877df18990458d15fc958af49185e7d285e18ec7e015a893399c3c17b6b not found: ID does not exist" Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.981304 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-config\") pod \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.981365 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spz9h\" (UniqueName: \"kubernetes.io/projected/a4c3e61a-c02f-410d-aed1-76fe207f46c5-kube-api-access-spz9h\") pod \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.981422 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-dns-svc\") pod \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\" (UID: \"a4c3e61a-c02f-410d-aed1-76fe207f46c5\") " Nov 25 09:21:54 crc kubenswrapper[4687]: I1125 09:21:54.993712 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4c3e61a-c02f-410d-aed1-76fe207f46c5-kube-api-access-spz9h" (OuterVolumeSpecName: "kube-api-access-spz9h") pod "a4c3e61a-c02f-410d-aed1-76fe207f46c5" (UID: "a4c3e61a-c02f-410d-aed1-76fe207f46c5"). InnerVolumeSpecName "kube-api-access-spz9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:55 crc kubenswrapper[4687]: I1125 09:21:55.056360 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a4c3e61a-c02f-410d-aed1-76fe207f46c5" (UID: "a4c3e61a-c02f-410d-aed1-76fe207f46c5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:55 crc kubenswrapper[4687]: I1125 09:21:55.065190 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-config" (OuterVolumeSpecName: "config") pod "a4c3e61a-c02f-410d-aed1-76fe207f46c5" (UID: "a4c3e61a-c02f-410d-aed1-76fe207f46c5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:55 crc kubenswrapper[4687]: I1125 09:21:55.083569 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:55 crc kubenswrapper[4687]: I1125 09:21:55.083598 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spz9h\" (UniqueName: \"kubernetes.io/projected/a4c3e61a-c02f-410d-aed1-76fe207f46c5-kube-api-access-spz9h\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:55 crc kubenswrapper[4687]: I1125 09:21:55.083610 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4c3e61a-c02f-410d-aed1-76fe207f46c5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:55 crc kubenswrapper[4687]: I1125 09:21:55.210188 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mtcxf"] Nov 25 09:21:55 crc kubenswrapper[4687]: I1125 09:21:55.220337 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mtcxf"] Nov 25 09:21:55 crc kubenswrapper[4687]: I1125 09:21:55.754373 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4c3e61a-c02f-410d-aed1-76fe207f46c5" path="/var/lib/kubelet/pods/a4c3e61a-c02f-410d-aed1-76fe207f46c5/volumes" Nov 25 09:21:55 crc kubenswrapper[4687]: I1125 09:21:55.897236 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:21:55 crc kubenswrapper[4687]: E1125 09:21:55.897818 4687 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:21:55 crc kubenswrapper[4687]: E1125 09:21:55.898695 4687 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:21:55 crc kubenswrapper[4687]: E1125 09:21:55.898761 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift podName:1f6a5d97-063d-47e3-b049-fd2b9b46ee77 nodeName:}" failed. No retries permitted until 2025-11-25 09:22:11.898743304 +0000 UTC m=+1126.952383022 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift") pod "swift-storage-0" (UID: "1f6a5d97-063d-47e3-b049-fd2b9b46ee77") : configmap "swift-ring-files" not found Nov 25 09:21:56 crc kubenswrapper[4687]: I1125 09:21:56.892465 4687 generic.go:334] "Generic (PLEG): container finished" podID="aee1a7b3-633b-455a-903a-7b00ef90ea07" containerID="b4396e9cbe46737cfed524326f3719dd3a383b7923ac5134afa14be1ba6a5837" exitCode=0 Nov 25 09:21:56 crc kubenswrapper[4687]: I1125 09:21:56.892551 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7x797" event={"ID":"aee1a7b3-633b-455a-903a-7b00ef90ea07","Type":"ContainerDied","Data":"b4396e9cbe46737cfed524326f3719dd3a383b7923ac5134afa14be1ba6a5837"} Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.478358 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-2jfzx"] Nov 25 09:21:57 crc kubenswrapper[4687]: E1125 09:21:57.478783 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb1585c-c72a-44ee-8614-3a84312c01ab" containerName="mariadb-database-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.478805 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb1585c-c72a-44ee-8614-3a84312c01ab" containerName="mariadb-database-create" Nov 25 09:21:57 crc kubenswrapper[4687]: E1125 09:21:57.478817 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="070421fb-5d0d-4088-bcae-f6d19fdc21fa" containerName="mariadb-account-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.478825 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="070421fb-5d0d-4088-bcae-f6d19fdc21fa" containerName="mariadb-account-create" Nov 25 09:21:57 crc kubenswrapper[4687]: E1125 09:21:57.478841 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4c3e61a-c02f-410d-aed1-76fe207f46c5" containerName="dnsmasq-dns" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.478849 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4c3e61a-c02f-410d-aed1-76fe207f46c5" containerName="dnsmasq-dns" Nov 25 09:21:57 crc kubenswrapper[4687]: E1125 09:21:57.478862 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3612478-e4e2-49bb-aecc-1eb23c44975b" containerName="mariadb-database-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.478871 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3612478-e4e2-49bb-aecc-1eb23c44975b" containerName="mariadb-database-create" Nov 25 09:21:57 crc kubenswrapper[4687]: E1125 09:21:57.478888 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50" containerName="mariadb-account-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.478896 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50" containerName="mariadb-account-create" Nov 25 09:21:57 crc kubenswrapper[4687]: E1125 09:21:57.478906 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4c3e61a-c02f-410d-aed1-76fe207f46c5" containerName="init" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.478913 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4c3e61a-c02f-410d-aed1-76fe207f46c5" containerName="init" Nov 25 09:21:57 crc kubenswrapper[4687]: E1125 09:21:57.478936 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2370488-8d98-49cc-acc8-52f5c5c77de7" containerName="mariadb-database-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.478943 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2370488-8d98-49cc-acc8-52f5c5c77de7" containerName="mariadb-database-create" Nov 25 09:21:57 crc kubenswrapper[4687]: E1125 09:21:57.478957 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27fd9468-4e1f-4cc9-9711-6e378f4236ec" containerName="mariadb-account-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.478964 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="27fd9468-4e1f-4cc9-9711-6e378f4236ec" containerName="mariadb-account-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.479183 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50" containerName="mariadb-account-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.479200 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2370488-8d98-49cc-acc8-52f5c5c77de7" containerName="mariadb-database-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.479215 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="070421fb-5d0d-4088-bcae-f6d19fdc21fa" containerName="mariadb-account-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.479224 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb1585c-c72a-44ee-8614-3a84312c01ab" containerName="mariadb-database-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.479237 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3612478-e4e2-49bb-aecc-1eb23c44975b" containerName="mariadb-database-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.479248 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4c3e61a-c02f-410d-aed1-76fe207f46c5" containerName="dnsmasq-dns" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.479259 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="27fd9468-4e1f-4cc9-9711-6e378f4236ec" containerName="mariadb-account-create" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.480070 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.481950 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.483417 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rggzb" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.495904 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-2jfzx"] Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.623011 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl6nc\" (UniqueName: \"kubernetes.io/projected/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-kube-api-access-pl6nc\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.623090 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-config-data\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.623279 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-combined-ca-bundle\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.623337 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-db-sync-config-data\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.725064 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl6nc\" (UniqueName: \"kubernetes.io/projected/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-kube-api-access-pl6nc\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.725157 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-config-data\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.725213 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-combined-ca-bundle\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.725250 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-db-sync-config-data\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.731818 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-combined-ca-bundle\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.733446 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-db-sync-config-data\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.738296 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-config-data\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.745203 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl6nc\" (UniqueName: \"kubernetes.io/projected/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-kube-api-access-pl6nc\") pod \"glance-db-sync-2jfzx\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:57 crc kubenswrapper[4687]: I1125 09:21:57.798111 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2jfzx" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.269072 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.335224 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hxtn\" (UniqueName: \"kubernetes.io/projected/aee1a7b3-633b-455a-903a-7b00ef90ea07-kube-api-access-2hxtn\") pod \"aee1a7b3-633b-455a-903a-7b00ef90ea07\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.335348 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-ring-data-devices\") pod \"aee1a7b3-633b-455a-903a-7b00ef90ea07\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.335432 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-swiftconf\") pod \"aee1a7b3-633b-455a-903a-7b00ef90ea07\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.335462 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-combined-ca-bundle\") pod \"aee1a7b3-633b-455a-903a-7b00ef90ea07\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.335484 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/aee1a7b3-633b-455a-903a-7b00ef90ea07-etc-swift\") pod \"aee1a7b3-633b-455a-903a-7b00ef90ea07\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.335535 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-scripts\") pod \"aee1a7b3-633b-455a-903a-7b00ef90ea07\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.335585 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-dispersionconf\") pod \"aee1a7b3-633b-455a-903a-7b00ef90ea07\" (UID: \"aee1a7b3-633b-455a-903a-7b00ef90ea07\") " Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.336405 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "aee1a7b3-633b-455a-903a-7b00ef90ea07" (UID: "aee1a7b3-633b-455a-903a-7b00ef90ea07"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.336840 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aee1a7b3-633b-455a-903a-7b00ef90ea07-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "aee1a7b3-633b-455a-903a-7b00ef90ea07" (UID: "aee1a7b3-633b-455a-903a-7b00ef90ea07"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.343050 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aee1a7b3-633b-455a-903a-7b00ef90ea07-kube-api-access-2hxtn" (OuterVolumeSpecName: "kube-api-access-2hxtn") pod "aee1a7b3-633b-455a-903a-7b00ef90ea07" (UID: "aee1a7b3-633b-455a-903a-7b00ef90ea07"). InnerVolumeSpecName "kube-api-access-2hxtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.344264 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "aee1a7b3-633b-455a-903a-7b00ef90ea07" (UID: "aee1a7b3-633b-455a-903a-7b00ef90ea07"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.366014 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "aee1a7b3-633b-455a-903a-7b00ef90ea07" (UID: "aee1a7b3-633b-455a-903a-7b00ef90ea07"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.376705 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aee1a7b3-633b-455a-903a-7b00ef90ea07" (UID: "aee1a7b3-633b-455a-903a-7b00ef90ea07"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.378254 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-scripts" (OuterVolumeSpecName: "scripts") pod "aee1a7b3-633b-455a-903a-7b00ef90ea07" (UID: "aee1a7b3-633b-455a-903a-7b00ef90ea07"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.384063 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-2jfzx"] Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.437261 4687 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.437588 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.437599 4687 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/aee1a7b3-633b-455a-903a-7b00ef90ea07-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.437608 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.437616 4687 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/aee1a7b3-633b-455a-903a-7b00ef90ea07-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.437624 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hxtn\" (UniqueName: \"kubernetes.io/projected/aee1a7b3-633b-455a-903a-7b00ef90ea07-kube-api-access-2hxtn\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.437632 4687 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/aee1a7b3-633b-455a-903a-7b00ef90ea07-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.630977 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.912904 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7x797" event={"ID":"aee1a7b3-633b-455a-903a-7b00ef90ea07","Type":"ContainerDied","Data":"e7b9f79e7e3e373e070fc7587d11db58a86e400cb8967fe4ee64c0a92b35ec71"} Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.912956 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7b9f79e7e3e373e070fc7587d11db58a86e400cb8967fe4ee64c0a92b35ec71" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.913020 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7x797" Nov 25 09:21:58 crc kubenswrapper[4687]: I1125 09:21:58.916001 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2jfzx" event={"ID":"c723bbb1-d23f-4a7a-9e52-ba3279dd969b","Type":"ContainerStarted","Data":"188748eecb9bab932a99e4da93978f0217c7cf0d6fc42c9fdf86612173dd2b9b"} Nov 25 09:22:02 crc kubenswrapper[4687]: I1125 09:22:02.700599 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-fdpzn" podUID="6894bad0-9f1e-4d44-89a3-b06c6b24495a" containerName="ovn-controller" probeResult="failure" output=< Nov 25 09:22:02 crc kubenswrapper[4687]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 09:22:02 crc kubenswrapper[4687]: > Nov 25 09:22:02 crc kubenswrapper[4687]: I1125 09:22:02.734533 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:22:04 crc kubenswrapper[4687]: I1125 09:22:04.965825 4687 generic.go:334] "Generic (PLEG): container finished" podID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" containerID="c595258a961b41004a924e3c4310ee9e02391fc1013460452a08f5fb21a05e95" exitCode=0 Nov 25 09:22:04 crc kubenswrapper[4687]: I1125 09:22:04.966004 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7","Type":"ContainerDied","Data":"c595258a961b41004a924e3c4310ee9e02391fc1013460452a08f5fb21a05e95"} Nov 25 09:22:04 crc kubenswrapper[4687]: I1125 09:22:04.971226 4687 generic.go:334] "Generic (PLEG): container finished" podID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" containerID="9916f15a8da56f333d7b7d285a6679cc365924b75d455e2c7dae8ee65095fcab" exitCode=0 Nov 25 09:22:04 crc kubenswrapper[4687]: I1125 09:22:04.972719 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee","Type":"ContainerDied","Data":"9916f15a8da56f333d7b7d285a6679cc365924b75d455e2c7dae8ee65095fcab"} Nov 25 09:22:07 crc kubenswrapper[4687]: I1125 09:22:07.690437 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-fdpzn" podUID="6894bad0-9f1e-4d44-89a3-b06c6b24495a" containerName="ovn-controller" probeResult="failure" output=< Nov 25 09:22:07 crc kubenswrapper[4687]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 09:22:07 crc kubenswrapper[4687]: > Nov 25 09:22:07 crc kubenswrapper[4687]: I1125 09:22:07.710323 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hstvx" Nov 25 09:22:07 crc kubenswrapper[4687]: I1125 09:22:07.918366 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-fdpzn-config-fvbb8"] Nov 25 09:22:07 crc kubenswrapper[4687]: E1125 09:22:07.918789 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aee1a7b3-633b-455a-903a-7b00ef90ea07" containerName="swift-ring-rebalance" Nov 25 09:22:07 crc kubenswrapper[4687]: I1125 09:22:07.918806 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="aee1a7b3-633b-455a-903a-7b00ef90ea07" containerName="swift-ring-rebalance" Nov 25 09:22:07 crc kubenswrapper[4687]: I1125 09:22:07.918952 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="aee1a7b3-633b-455a-903a-7b00ef90ea07" containerName="swift-ring-rebalance" Nov 25 09:22:07 crc kubenswrapper[4687]: I1125 09:22:07.919490 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:07 crc kubenswrapper[4687]: I1125 09:22:07.921084 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 09:22:07 crc kubenswrapper[4687]: I1125 09:22:07.962666 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fdpzn-config-fvbb8"] Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.024345 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-log-ovn\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.024687 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-scripts\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.024710 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlhx9\" (UniqueName: \"kubernetes.io/projected/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-kube-api-access-qlhx9\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.024733 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-additional-scripts\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.024755 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.024790 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run-ovn\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.126773 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-scripts\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.126835 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlhx9\" (UniqueName: \"kubernetes.io/projected/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-kube-api-access-qlhx9\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.126874 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-additional-scripts\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.126915 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.126961 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run-ovn\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.127017 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-log-ovn\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.127339 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-log-ovn\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.127399 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.127437 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run-ovn\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.128184 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-additional-scripts\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.128751 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-scripts\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.167948 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlhx9\" (UniqueName: \"kubernetes.io/projected/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-kube-api-access-qlhx9\") pod \"ovn-controller-fdpzn-config-fvbb8\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:08 crc kubenswrapper[4687]: I1125 09:22:08.284266 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:10 crc kubenswrapper[4687]: I1125 09:22:10.675480 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fdpzn-config-fvbb8"] Nov 25 09:22:11 crc kubenswrapper[4687]: I1125 09:22:11.021181 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fdpzn-config-fvbb8" event={"ID":"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5","Type":"ContainerStarted","Data":"ee0387d9f6cc69d2072f04e40da6eabc145cdf6bfd19f369d6044ac38a97b5a0"} Nov 25 09:22:11 crc kubenswrapper[4687]: I1125 09:22:11.022729 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7","Type":"ContainerStarted","Data":"32e97fd39f4e2ee10c2ab632c1828fb5f358d328844833af75706ed6838b9409"} Nov 25 09:22:11 crc kubenswrapper[4687]: I1125 09:22:11.022933 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:22:11 crc kubenswrapper[4687]: I1125 09:22:11.024305 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee","Type":"ContainerStarted","Data":"71e762c5d8e711770e4fd5b25b4c1dd54aa6f6da8ce4400c2e1b5875f3601c1b"} Nov 25 09:22:11 crc kubenswrapper[4687]: I1125 09:22:11.024700 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 09:22:11 crc kubenswrapper[4687]: I1125 09:22:11.050979 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=59.726735165 podStartE2EDuration="1m9.050962547s" podCreationTimestamp="2025-11-25 09:21:02 +0000 UTC" firstStartedPulling="2025-11-25 09:21:19.386176695 +0000 UTC m=+1074.439816413" lastFinishedPulling="2025-11-25 09:21:28.710404077 +0000 UTC m=+1083.764043795" observedRunningTime="2025-11-25 09:22:11.049722643 +0000 UTC m=+1126.103362361" watchObservedRunningTime="2025-11-25 09:22:11.050962547 +0000 UTC m=+1126.104602265" Nov 25 09:22:11 crc kubenswrapper[4687]: I1125 09:22:11.080563 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=57.888173755 podStartE2EDuration="1m9.080546905s" podCreationTimestamp="2025-11-25 09:21:02 +0000 UTC" firstStartedPulling="2025-11-25 09:21:19.60899333 +0000 UTC m=+1074.662633048" lastFinishedPulling="2025-11-25 09:21:30.80136648 +0000 UTC m=+1085.855006198" observedRunningTime="2025-11-25 09:22:11.076616737 +0000 UTC m=+1126.130256455" watchObservedRunningTime="2025-11-25 09:22:11.080546905 +0000 UTC m=+1126.134186623" Nov 25 09:22:11 crc kubenswrapper[4687]: I1125 09:22:11.910719 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:22:11 crc kubenswrapper[4687]: I1125 09:22:11.927709 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f6a5d97-063d-47e3-b049-fd2b9b46ee77-etc-swift\") pod \"swift-storage-0\" (UID: \"1f6a5d97-063d-47e3-b049-fd2b9b46ee77\") " pod="openstack/swift-storage-0" Nov 25 09:22:12 crc kubenswrapper[4687]: I1125 09:22:12.035191 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2jfzx" event={"ID":"c723bbb1-d23f-4a7a-9e52-ba3279dd969b","Type":"ContainerStarted","Data":"7270282671fe1dcb14bc4acada6c83d2dde70fd11596965d46b046a4e4e7b1b8"} Nov 25 09:22:12 crc kubenswrapper[4687]: I1125 09:22:12.038025 4687 generic.go:334] "Generic (PLEG): container finished" podID="5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" containerID="008cef6255ef85911a6ffa5ef19ef4d3aef603586a8c0f4d6bd7a6b2ac9b0835" exitCode=0 Nov 25 09:22:12 crc kubenswrapper[4687]: I1125 09:22:12.038090 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fdpzn-config-fvbb8" event={"ID":"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5","Type":"ContainerDied","Data":"008cef6255ef85911a6ffa5ef19ef4d3aef603586a8c0f4d6bd7a6b2ac9b0835"} Nov 25 09:22:12 crc kubenswrapper[4687]: I1125 09:22:12.059351 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-2jfzx" podStartSLOduration=2.974312738 podStartE2EDuration="15.059329698s" podCreationTimestamp="2025-11-25 09:21:57 +0000 UTC" firstStartedPulling="2025-11-25 09:21:58.386767468 +0000 UTC m=+1113.440407196" lastFinishedPulling="2025-11-25 09:22:10.471784428 +0000 UTC m=+1125.525424156" observedRunningTime="2025-11-25 09:22:12.058855455 +0000 UTC m=+1127.112495183" watchObservedRunningTime="2025-11-25 09:22:12.059329698 +0000 UTC m=+1127.112969416" Nov 25 09:22:12 crc kubenswrapper[4687]: I1125 09:22:12.184173 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 09:22:12 crc kubenswrapper[4687]: I1125 09:22:12.696671 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-fdpzn" Nov 25 09:22:12 crc kubenswrapper[4687]: I1125 09:22:12.721826 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.046440 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"faffa4d890dddcb0c27506ee6b25a0fea7723b5c1d12272f88c53370713be8d3"} Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.326107 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.434196 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlhx9\" (UniqueName: \"kubernetes.io/projected/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-kube-api-access-qlhx9\") pod \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.434324 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-log-ovn\") pod \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.434410 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" (UID: "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.434495 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-scripts\") pod \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.434598 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run\") pod \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.434622 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run-ovn\") pod \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.434667 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-additional-scripts\") pod \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\" (UID: \"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5\") " Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.434765 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" (UID: "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.434742 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run" (OuterVolumeSpecName: "var-run") pod "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" (UID: "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.435065 4687 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.435090 4687 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.435103 4687 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.435471 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" (UID: "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.435630 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-scripts" (OuterVolumeSpecName: "scripts") pod "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" (UID: "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.458754 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-kube-api-access-qlhx9" (OuterVolumeSpecName: "kube-api-access-qlhx9") pod "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" (UID: "5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5"). InnerVolumeSpecName "kube-api-access-qlhx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.535969 4687 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.536000 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlhx9\" (UniqueName: \"kubernetes.io/projected/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-kube-api-access-qlhx9\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:13 crc kubenswrapper[4687]: I1125 09:22:13.536013 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:14 crc kubenswrapper[4687]: I1125 09:22:14.055650 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fdpzn-config-fvbb8" event={"ID":"5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5","Type":"ContainerDied","Data":"ee0387d9f6cc69d2072f04e40da6eabc145cdf6bfd19f369d6044ac38a97b5a0"} Nov 25 09:22:14 crc kubenswrapper[4687]: I1125 09:22:14.055918 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee0387d9f6cc69d2072f04e40da6eabc145cdf6bfd19f369d6044ac38a97b5a0" Nov 25 09:22:14 crc kubenswrapper[4687]: I1125 09:22:14.055689 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fdpzn-config-fvbb8" Nov 25 09:22:14 crc kubenswrapper[4687]: I1125 09:22:14.444576 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-fdpzn-config-fvbb8"] Nov 25 09:22:14 crc kubenswrapper[4687]: I1125 09:22:14.453849 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-fdpzn-config-fvbb8"] Nov 25 09:22:15 crc kubenswrapper[4687]: I1125 09:22:15.066419 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"d68fe1afe74ee1050ba4201e12ff81596788f8a0157fd599015203c4ace4bd21"} Nov 25 09:22:15 crc kubenswrapper[4687]: I1125 09:22:15.066779 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"90467da0076c4dbbb88ba1f470bc9b3e17f655fa89be9e465cfa25e9c91d945e"} Nov 25 09:22:15 crc kubenswrapper[4687]: I1125 09:22:15.066796 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"d5be824d50cf2e5f2df85dcf0520a2a8a6b8507ead63808044440f35cf23d3e4"} Nov 25 09:22:15 crc kubenswrapper[4687]: I1125 09:22:15.066807 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"eb9562aa8c4ee3058f1c9379c60792e2459b9898ff125fbca3e78215729364d4"} Nov 25 09:22:15 crc kubenswrapper[4687]: I1125 09:22:15.750229 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" path="/var/lib/kubelet/pods/5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5/volumes" Nov 25 09:22:16 crc kubenswrapper[4687]: I1125 09:22:16.078832 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"987ce8e604ad5597f4c8b6135666ab3eb05fa772e3d914ac423c622f06050e31"} Nov 25 09:22:17 crc kubenswrapper[4687]: I1125 09:22:17.099107 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"0207a415643bb05177f7f804354914a75fb5e26f9626dc63a336a95abe33db28"} Nov 25 09:22:17 crc kubenswrapper[4687]: I1125 09:22:17.099430 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"cde12e1111934cdc706815fc7dd4ae983d5ba1c041ed6964eacc7f34526632d0"} Nov 25 09:22:17 crc kubenswrapper[4687]: I1125 09:22:17.099441 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"ebc54301c0304691cf94120f738828b0e637da3a6acd995e5d42e1904e617065"} Nov 25 09:22:18 crc kubenswrapper[4687]: I1125 09:22:18.112246 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"02c599df786d8d5c7fd97731c309638aa651f45d8cc9fa9f2394250b6fb17169"} Nov 25 09:22:18 crc kubenswrapper[4687]: I1125 09:22:18.113386 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"f620d9599a7ff3b2beda7062cf6cff62cb36ee5a64506921f37689352bb74105"} Nov 25 09:22:18 crc kubenswrapper[4687]: I1125 09:22:18.113405 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"c5facfea4f93ccddd4d27baba8d78265199cc76f9f4841e15c2cde8b7518c74f"} Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.139258 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"e5138dc083770350b7ecd5a554841951d59cd8a6a372c7f7b0293f793890b292"} Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.139346 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"1c4f51a2a25a0c2f0dd8df5a85e228f338d4a738ef5b6f6b3ec179baefbc2e57"} Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.139365 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"d1e25293740d2e234b74fad52c0d49835e91b1b0051b6a6638978fed4563db52"} Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.139377 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f6a5d97-063d-47e3-b049-fd2b9b46ee77","Type":"ContainerStarted","Data":"c40c7697326d7b3cca291dbebe56dc519992536f00684ac1ccb48399146b5b73"} Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.146995 4687 generic.go:334] "Generic (PLEG): container finished" podID="c723bbb1-d23f-4a7a-9e52-ba3279dd969b" containerID="7270282671fe1dcb14bc4acada6c83d2dde70fd11596965d46b046a4e4e7b1b8" exitCode=0 Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.147064 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2jfzx" event={"ID":"c723bbb1-d23f-4a7a-9e52-ba3279dd969b","Type":"ContainerDied","Data":"7270282671fe1dcb14bc4acada6c83d2dde70fd11596965d46b046a4e4e7b1b8"} Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.192152 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=35.495316932 podStartE2EDuration="40.192112963s" podCreationTimestamp="2025-11-25 09:21:39 +0000 UTC" firstStartedPulling="2025-11-25 09:22:12.730282494 +0000 UTC m=+1127.783922212" lastFinishedPulling="2025-11-25 09:22:17.427078525 +0000 UTC m=+1132.480718243" observedRunningTime="2025-11-25 09:22:19.173873085 +0000 UTC m=+1134.227512823" watchObservedRunningTime="2025-11-25 09:22:19.192112963 +0000 UTC m=+1134.245752681" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.575370 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xdshj"] Nov 25 09:22:19 crc kubenswrapper[4687]: E1125 09:22:19.576107 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" containerName="ovn-config" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.576131 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" containerName="ovn-config" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.576400 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c90b3af-7c8f-4ad0-a9ba-fffd15a668d5" containerName="ovn-config" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.577471 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.579351 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.588153 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xdshj"] Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.633701 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.633734 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-config\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.633760 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.633811 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.633867 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qljm\" (UniqueName: \"kubernetes.io/projected/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-kube-api-access-4qljm\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.633888 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.734945 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.735004 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-config\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.735044 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.735129 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.735205 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qljm\" (UniqueName: \"kubernetes.io/projected/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-kube-api-access-4qljm\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.735238 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.735986 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.736189 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.736612 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-config\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.736720 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.736962 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.753896 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qljm\" (UniqueName: \"kubernetes.io/projected/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-kube-api-access-4qljm\") pod \"dnsmasq-dns-5c79d794d7-xdshj\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:19 crc kubenswrapper[4687]: I1125 09:22:19.913910 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.342120 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xdshj"] Nov 25 09:22:20 crc kubenswrapper[4687]: W1125 09:22:20.374164 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod206cc7c7_5ea3_4300_8017_d6dc3a5c279e.slice/crio-f2167611cc8a3ef08d7e2fbc2c3427fc2c8ba9f98aa3da414f827e15cac2b562 WatchSource:0}: Error finding container f2167611cc8a3ef08d7e2fbc2c3427fc2c8ba9f98aa3da414f827e15cac2b562: Status 404 returned error can't find the container with id f2167611cc8a3ef08d7e2fbc2c3427fc2c8ba9f98aa3da414f827e15cac2b562 Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.483071 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2jfzx" Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.651013 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-db-sync-config-data\") pod \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.651410 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-combined-ca-bundle\") pod \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.651548 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pl6nc\" (UniqueName: \"kubernetes.io/projected/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-kube-api-access-pl6nc\") pod \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.651589 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-config-data\") pod \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\" (UID: \"c723bbb1-d23f-4a7a-9e52-ba3279dd969b\") " Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.655115 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c723bbb1-d23f-4a7a-9e52-ba3279dd969b" (UID: "c723bbb1-d23f-4a7a-9e52-ba3279dd969b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.659018 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-kube-api-access-pl6nc" (OuterVolumeSpecName: "kube-api-access-pl6nc") pod "c723bbb1-d23f-4a7a-9e52-ba3279dd969b" (UID: "c723bbb1-d23f-4a7a-9e52-ba3279dd969b"). InnerVolumeSpecName "kube-api-access-pl6nc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.675040 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c723bbb1-d23f-4a7a-9e52-ba3279dd969b" (UID: "c723bbb1-d23f-4a7a-9e52-ba3279dd969b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.712456 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-config-data" (OuterVolumeSpecName: "config-data") pod "c723bbb1-d23f-4a7a-9e52-ba3279dd969b" (UID: "c723bbb1-d23f-4a7a-9e52-ba3279dd969b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.753291 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pl6nc\" (UniqueName: \"kubernetes.io/projected/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-kube-api-access-pl6nc\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.753335 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.753351 4687 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:20 crc kubenswrapper[4687]: I1125 09:22:20.753363 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c723bbb1-d23f-4a7a-9e52-ba3279dd969b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.164311 4687 generic.go:334] "Generic (PLEG): container finished" podID="206cc7c7-5ea3-4300-8017-d6dc3a5c279e" containerID="9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c" exitCode=0 Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.164411 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" event={"ID":"206cc7c7-5ea3-4300-8017-d6dc3a5c279e","Type":"ContainerDied","Data":"9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c"} Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.164456 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" event={"ID":"206cc7c7-5ea3-4300-8017-d6dc3a5c279e","Type":"ContainerStarted","Data":"f2167611cc8a3ef08d7e2fbc2c3427fc2c8ba9f98aa3da414f827e15cac2b562"} Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.166307 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2jfzx" event={"ID":"c723bbb1-d23f-4a7a-9e52-ba3279dd969b","Type":"ContainerDied","Data":"188748eecb9bab932a99e4da93978f0217c7cf0d6fc42c9fdf86612173dd2b9b"} Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.166340 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="188748eecb9bab932a99e4da93978f0217c7cf0d6fc42c9fdf86612173dd2b9b" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.166351 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2jfzx" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.625603 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xdshj"] Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.661059 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-qrhc5"] Nov 25 09:22:21 crc kubenswrapper[4687]: E1125 09:22:21.661394 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c723bbb1-d23f-4a7a-9e52-ba3279dd969b" containerName="glance-db-sync" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.661418 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c723bbb1-d23f-4a7a-9e52-ba3279dd969b" containerName="glance-db-sync" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.661652 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="c723bbb1-d23f-4a7a-9e52-ba3279dd969b" containerName="glance-db-sync" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.675958 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-qrhc5"] Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.676063 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.685011 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.685156 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5xzn\" (UniqueName: \"kubernetes.io/projected/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-kube-api-access-w5xzn\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.685197 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-config\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.685234 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.685311 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.685368 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.786773 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5xzn\" (UniqueName: \"kubernetes.io/projected/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-kube-api-access-w5xzn\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.786836 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-config\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.786859 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.786880 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.786903 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.786948 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.787820 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.788172 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.788329 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.788896 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.788994 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-config\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.807694 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5xzn\" (UniqueName: \"kubernetes.io/projected/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-kube-api-access-w5xzn\") pod \"dnsmasq-dns-5f59b8f679-qrhc5\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:21 crc kubenswrapper[4687]: I1125 09:22:21.994158 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:22 crc kubenswrapper[4687]: I1125 09:22:22.187932 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" event={"ID":"206cc7c7-5ea3-4300-8017-d6dc3a5c279e","Type":"ContainerStarted","Data":"e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2"} Nov 25 09:22:22 crc kubenswrapper[4687]: I1125 09:22:22.188136 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:22 crc kubenswrapper[4687]: I1125 09:22:22.225838 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" podStartSLOduration=3.22581534 podStartE2EDuration="3.22581534s" podCreationTimestamp="2025-11-25 09:22:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:22:22.221884653 +0000 UTC m=+1137.275524381" watchObservedRunningTime="2025-11-25 09:22:22.22581534 +0000 UTC m=+1137.279455058" Nov 25 09:22:22 crc kubenswrapper[4687]: I1125 09:22:22.536143 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-qrhc5"] Nov 25 09:22:22 crc kubenswrapper[4687]: W1125 09:22:22.551869 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5796267d_68c1_4f9f_bffe_0edaba3fa4d1.slice/crio-29d13f0485a553facd36014a37a22dcc44259819a18baa94bc3f9bffaa6e981a WatchSource:0}: Error finding container 29d13f0485a553facd36014a37a22dcc44259819a18baa94bc3f9bffaa6e981a: Status 404 returned error can't find the container with id 29d13f0485a553facd36014a37a22dcc44259819a18baa94bc3f9bffaa6e981a Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.196216 4687 generic.go:334] "Generic (PLEG): container finished" podID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerID="995b3ce25f38495565ac59d33ebf10d399b911a1013b1b0ff5158232384136b7" exitCode=0 Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.196655 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" podUID="206cc7c7-5ea3-4300-8017-d6dc3a5c279e" containerName="dnsmasq-dns" containerID="cri-o://e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2" gracePeriod=10 Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.197527 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" event={"ID":"5796267d-68c1-4f9f-bffe-0edaba3fa4d1","Type":"ContainerDied","Data":"995b3ce25f38495565ac59d33ebf10d399b911a1013b1b0ff5158232384136b7"} Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.197559 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" event={"ID":"5796267d-68c1-4f9f-bffe-0edaba3fa4d1","Type":"ContainerStarted","Data":"29d13f0485a553facd36014a37a22dcc44259819a18baa94bc3f9bffaa6e981a"} Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.429718 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.627483 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.669300 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-twnk8"] Nov 25 09:22:23 crc kubenswrapper[4687]: E1125 09:22:23.669841 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="206cc7c7-5ea3-4300-8017-d6dc3a5c279e" containerName="init" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.669859 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="206cc7c7-5ea3-4300-8017-d6dc3a5c279e" containerName="init" Nov 25 09:22:23 crc kubenswrapper[4687]: E1125 09:22:23.669872 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="206cc7c7-5ea3-4300-8017-d6dc3a5c279e" containerName="dnsmasq-dns" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.669878 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="206cc7c7-5ea3-4300-8017-d6dc3a5c279e" containerName="dnsmasq-dns" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.670015 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="206cc7c7-5ea3-4300-8017-d6dc3a5c279e" containerName="dnsmasq-dns" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.670567 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-twnk8" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.686011 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-twnk8"] Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.719349 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-sb\") pod \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.719397 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-nb\") pod \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.719573 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-svc\") pod \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.719613 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-config\") pod \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.719635 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qljm\" (UniqueName: \"kubernetes.io/projected/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-kube-api-access-4qljm\") pod \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.719681 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-swift-storage-0\") pod \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\" (UID: \"206cc7c7-5ea3-4300-8017-d6dc3a5c279e\") " Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.727070 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-kube-api-access-4qljm" (OuterVolumeSpecName: "kube-api-access-4qljm") pod "206cc7c7-5ea3-4300-8017-d6dc3a5c279e" (UID: "206cc7c7-5ea3-4300-8017-d6dc3a5c279e"). InnerVolumeSpecName "kube-api-access-4qljm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.764605 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.783247 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-config" (OuterVolumeSpecName: "config") pod "206cc7c7-5ea3-4300-8017-d6dc3a5c279e" (UID: "206cc7c7-5ea3-4300-8017-d6dc3a5c279e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.808077 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "206cc7c7-5ea3-4300-8017-d6dc3a5c279e" (UID: "206cc7c7-5ea3-4300-8017-d6dc3a5c279e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.808145 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-e3fb-account-create-w7xhv"] Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.809406 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e3fb-account-create-w7xhv" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.814524 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.817635 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "206cc7c7-5ea3-4300-8017-d6dc3a5c279e" (UID: "206cc7c7-5ea3-4300-8017-d6dc3a5c279e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.821016 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10b1fc74-8906-4881-a45c-e812501c6f2f-operator-scripts\") pod \"barbican-db-create-twnk8\" (UID: \"10b1fc74-8906-4881-a45c-e812501c6f2f\") " pod="openstack/barbican-db-create-twnk8" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.821092 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xt4jn\" (UniqueName: \"kubernetes.io/projected/10b1fc74-8906-4881-a45c-e812501c6f2f-kube-api-access-xt4jn\") pod \"barbican-db-create-twnk8\" (UID: \"10b1fc74-8906-4881-a45c-e812501c6f2f\") " pod="openstack/barbican-db-create-twnk8" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.821152 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.821165 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.821174 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.821182 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qljm\" (UniqueName: \"kubernetes.io/projected/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-kube-api-access-4qljm\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.823634 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "206cc7c7-5ea3-4300-8017-d6dc3a5c279e" (UID: "206cc7c7-5ea3-4300-8017-d6dc3a5c279e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.829708 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "206cc7c7-5ea3-4300-8017-d6dc3a5c279e" (UID: "206cc7c7-5ea3-4300-8017-d6dc3a5c279e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.829783 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e3fb-account-create-w7xhv"] Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.886054 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-86dxw"] Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.887048 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-86dxw" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.899882 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-86dxw"] Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.922431 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7fnk\" (UniqueName: \"kubernetes.io/projected/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-kube-api-access-w7fnk\") pod \"barbican-e3fb-account-create-w7xhv\" (UID: \"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c\") " pod="openstack/barbican-e3fb-account-create-w7xhv" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.922516 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10b1fc74-8906-4881-a45c-e812501c6f2f-operator-scripts\") pod \"barbican-db-create-twnk8\" (UID: \"10b1fc74-8906-4881-a45c-e812501c6f2f\") " pod="openstack/barbican-db-create-twnk8" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.922621 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xt4jn\" (UniqueName: \"kubernetes.io/projected/10b1fc74-8906-4881-a45c-e812501c6f2f-kube-api-access-xt4jn\") pod \"barbican-db-create-twnk8\" (UID: \"10b1fc74-8906-4881-a45c-e812501c6f2f\") " pod="openstack/barbican-db-create-twnk8" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.922736 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-operator-scripts\") pod \"barbican-e3fb-account-create-w7xhv\" (UID: \"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c\") " pod="openstack/barbican-e3fb-account-create-w7xhv" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.922856 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.922883 4687 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/206cc7c7-5ea3-4300-8017-d6dc3a5c279e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.924201 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10b1fc74-8906-4881-a45c-e812501c6f2f-operator-scripts\") pod \"barbican-db-create-twnk8\" (UID: \"10b1fc74-8906-4881-a45c-e812501c6f2f\") " pod="openstack/barbican-db-create-twnk8" Nov 25 09:22:23 crc kubenswrapper[4687]: I1125 09:22:23.946829 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xt4jn\" (UniqueName: \"kubernetes.io/projected/10b1fc74-8906-4881-a45c-e812501c6f2f-kube-api-access-xt4jn\") pod \"barbican-db-create-twnk8\" (UID: \"10b1fc74-8906-4881-a45c-e812501c6f2f\") " pod="openstack/barbican-db-create-twnk8" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.006931 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-twnk8" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.023962 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctnhn\" (UniqueName: \"kubernetes.io/projected/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-kube-api-access-ctnhn\") pod \"cinder-db-create-86dxw\" (UID: \"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf\") " pod="openstack/cinder-db-create-86dxw" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.024047 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7fnk\" (UniqueName: \"kubernetes.io/projected/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-kube-api-access-w7fnk\") pod \"barbican-e3fb-account-create-w7xhv\" (UID: \"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c\") " pod="openstack/barbican-e3fb-account-create-w7xhv" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.024380 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-operator-scripts\") pod \"cinder-db-create-86dxw\" (UID: \"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf\") " pod="openstack/cinder-db-create-86dxw" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.024440 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-operator-scripts\") pod \"barbican-e3fb-account-create-w7xhv\" (UID: \"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c\") " pod="openstack/barbican-e3fb-account-create-w7xhv" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.025110 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-operator-scripts\") pod \"barbican-e3fb-account-create-w7xhv\" (UID: \"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c\") " pod="openstack/barbican-e3fb-account-create-w7xhv" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.054110 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7fnk\" (UniqueName: \"kubernetes.io/projected/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-kube-api-access-w7fnk\") pod \"barbican-e3fb-account-create-w7xhv\" (UID: \"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c\") " pod="openstack/barbican-e3fb-account-create-w7xhv" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.080620 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-gtv52"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.086567 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gtv52" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.112597 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-de73-account-create-p9ft5"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.114183 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-de73-account-create-p9ft5" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.116064 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.127862 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-operator-scripts\") pod \"cinder-db-create-86dxw\" (UID: \"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf\") " pod="openstack/cinder-db-create-86dxw" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.127926 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctnhn\" (UniqueName: \"kubernetes.io/projected/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-kube-api-access-ctnhn\") pod \"cinder-db-create-86dxw\" (UID: \"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf\") " pod="openstack/cinder-db-create-86dxw" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.128620 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-operator-scripts\") pod \"cinder-db-create-86dxw\" (UID: \"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf\") " pod="openstack/cinder-db-create-86dxw" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.159184 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-gtv52"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.178733 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-de73-account-create-p9ft5"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.199925 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctnhn\" (UniqueName: \"kubernetes.io/projected/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-kube-api-access-ctnhn\") pod \"cinder-db-create-86dxw\" (UID: \"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf\") " pod="openstack/cinder-db-create-86dxw" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.215635 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-86dxw" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.229395 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95391d75-b60d-49a7-892d-4236d940363c-operator-scripts\") pod \"cinder-de73-account-create-p9ft5\" (UID: \"95391d75-b60d-49a7-892d-4236d940363c\") " pod="openstack/cinder-de73-account-create-p9ft5" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.229600 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e3fb-account-create-w7xhv" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.229755 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p2t6\" (UniqueName: \"kubernetes.io/projected/95391d75-b60d-49a7-892d-4236d940363c-kube-api-access-4p2t6\") pod \"cinder-de73-account-create-p9ft5\" (UID: \"95391d75-b60d-49a7-892d-4236d940363c\") " pod="openstack/cinder-de73-account-create-p9ft5" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.229778 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnswn\" (UniqueName: \"kubernetes.io/projected/57674a17-12fc-4749-bcc9-19a92f1c5016-kube-api-access-nnswn\") pod \"neutron-db-create-gtv52\" (UID: \"57674a17-12fc-4749-bcc9-19a92f1c5016\") " pod="openstack/neutron-db-create-gtv52" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.229799 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57674a17-12fc-4749-bcc9-19a92f1c5016-operator-scripts\") pod \"neutron-db-create-gtv52\" (UID: \"57674a17-12fc-4749-bcc9-19a92f1c5016\") " pod="openstack/neutron-db-create-gtv52" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.231374 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-pstdr"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.232832 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.240237 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-l777p" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.240444 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.240587 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.240713 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.259959 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-pstdr"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.270230 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" event={"ID":"5796267d-68c1-4f9f-bffe-0edaba3fa4d1","Type":"ContainerStarted","Data":"cb396f0172c75527c46056c0b6b6df0e78d69812ee665e53aef312be06664a72"} Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.270495 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.290034 4687 generic.go:334] "Generic (PLEG): container finished" podID="206cc7c7-5ea3-4300-8017-d6dc3a5c279e" containerID="e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2" exitCode=0 Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.290083 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" event={"ID":"206cc7c7-5ea3-4300-8017-d6dc3a5c279e","Type":"ContainerDied","Data":"e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2"} Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.290100 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.290115 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-xdshj" event={"ID":"206cc7c7-5ea3-4300-8017-d6dc3a5c279e","Type":"ContainerDied","Data":"f2167611cc8a3ef08d7e2fbc2c3427fc2c8ba9f98aa3da414f827e15cac2b562"} Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.290137 4687 scope.go:117] "RemoveContainer" containerID="e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.308994 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-4573-account-create-chmkq"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.311985 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4573-account-create-chmkq" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.317304 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.319875 4687 scope.go:117] "RemoveContainer" containerID="9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.330988 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-combined-ca-bundle\") pod \"keystone-db-sync-pstdr\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.331060 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p2t6\" (UniqueName: \"kubernetes.io/projected/95391d75-b60d-49a7-892d-4236d940363c-kube-api-access-4p2t6\") pod \"cinder-de73-account-create-p9ft5\" (UID: \"95391d75-b60d-49a7-892d-4236d940363c\") " pod="openstack/cinder-de73-account-create-p9ft5" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.331087 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnswn\" (UniqueName: \"kubernetes.io/projected/57674a17-12fc-4749-bcc9-19a92f1c5016-kube-api-access-nnswn\") pod \"neutron-db-create-gtv52\" (UID: \"57674a17-12fc-4749-bcc9-19a92f1c5016\") " pod="openstack/neutron-db-create-gtv52" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.331117 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57674a17-12fc-4749-bcc9-19a92f1c5016-operator-scripts\") pod \"neutron-db-create-gtv52\" (UID: \"57674a17-12fc-4749-bcc9-19a92f1c5016\") " pod="openstack/neutron-db-create-gtv52" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.331162 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-config-data\") pod \"keystone-db-sync-pstdr\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.331196 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95391d75-b60d-49a7-892d-4236d940363c-operator-scripts\") pod \"cinder-de73-account-create-p9ft5\" (UID: \"95391d75-b60d-49a7-892d-4236d940363c\") " pod="openstack/cinder-de73-account-create-p9ft5" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.331250 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtrlj\" (UniqueName: \"kubernetes.io/projected/66108854-2b2f-49c8-be7a-b80e336dc27b-kube-api-access-gtrlj\") pod \"keystone-db-sync-pstdr\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.332170 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95391d75-b60d-49a7-892d-4236d940363c-operator-scripts\") pod \"cinder-de73-account-create-p9ft5\" (UID: \"95391d75-b60d-49a7-892d-4236d940363c\") " pod="openstack/cinder-de73-account-create-p9ft5" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.332645 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57674a17-12fc-4749-bcc9-19a92f1c5016-operator-scripts\") pod \"neutron-db-create-gtv52\" (UID: \"57674a17-12fc-4749-bcc9-19a92f1c5016\") " pod="openstack/neutron-db-create-gtv52" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.336463 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4573-account-create-chmkq"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.346980 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnswn\" (UniqueName: \"kubernetes.io/projected/57674a17-12fc-4749-bcc9-19a92f1c5016-kube-api-access-nnswn\") pod \"neutron-db-create-gtv52\" (UID: \"57674a17-12fc-4749-bcc9-19a92f1c5016\") " pod="openstack/neutron-db-create-gtv52" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.347288 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" podStartSLOduration=3.3472770929999998 podStartE2EDuration="3.347277093s" podCreationTimestamp="2025-11-25 09:22:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:22:24.304990347 +0000 UTC m=+1139.358630065" watchObservedRunningTime="2025-11-25 09:22:24.347277093 +0000 UTC m=+1139.400916811" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.351473 4687 scope.go:117] "RemoveContainer" containerID="e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2" Nov 25 09:22:24 crc kubenswrapper[4687]: E1125 09:22:24.352706 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2\": container with ID starting with e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2 not found: ID does not exist" containerID="e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.352765 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2"} err="failed to get container status \"e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2\": rpc error: code = NotFound desc = could not find container \"e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2\": container with ID starting with e6cd37fa4f8e65ec04f49baab6bc1244fe29b2cc9a33c0b2d0274916ebccdae2 not found: ID does not exist" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.352789 4687 scope.go:117] "RemoveContainer" containerID="9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c" Nov 25 09:22:24 crc kubenswrapper[4687]: E1125 09:22:24.353206 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c\": container with ID starting with 9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c not found: ID does not exist" containerID="9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.353231 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c"} err="failed to get container status \"9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c\": rpc error: code = NotFound desc = could not find container \"9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c\": container with ID starting with 9f335559b5ff5029358a9e0e948294970ccfe0b00d7a172f83737a45f431d48c not found: ID does not exist" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.365034 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xdshj"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.368534 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p2t6\" (UniqueName: \"kubernetes.io/projected/95391d75-b60d-49a7-892d-4236d940363c-kube-api-access-4p2t6\") pod \"cinder-de73-account-create-p9ft5\" (UID: \"95391d75-b60d-49a7-892d-4236d940363c\") " pod="openstack/cinder-de73-account-create-p9ft5" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.371833 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xdshj"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.430612 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gtv52" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.434461 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kw6xn\" (UniqueName: \"kubernetes.io/projected/920e2328-777b-4ee4-8bda-f5dc74435740-kube-api-access-kw6xn\") pod \"neutron-4573-account-create-chmkq\" (UID: \"920e2328-777b-4ee4-8bda-f5dc74435740\") " pod="openstack/neutron-4573-account-create-chmkq" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.434529 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-combined-ca-bundle\") pod \"keystone-db-sync-pstdr\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.434572 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-config-data\") pod \"keystone-db-sync-pstdr\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.434597 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/920e2328-777b-4ee4-8bda-f5dc74435740-operator-scripts\") pod \"neutron-4573-account-create-chmkq\" (UID: \"920e2328-777b-4ee4-8bda-f5dc74435740\") " pod="openstack/neutron-4573-account-create-chmkq" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.434632 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtrlj\" (UniqueName: \"kubernetes.io/projected/66108854-2b2f-49c8-be7a-b80e336dc27b-kube-api-access-gtrlj\") pod \"keystone-db-sync-pstdr\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.438567 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-combined-ca-bundle\") pod \"keystone-db-sync-pstdr\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.439844 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-de73-account-create-p9ft5" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.442781 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-config-data\") pod \"keystone-db-sync-pstdr\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.453119 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtrlj\" (UniqueName: \"kubernetes.io/projected/66108854-2b2f-49c8-be7a-b80e336dc27b-kube-api-access-gtrlj\") pod \"keystone-db-sync-pstdr\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.535698 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kw6xn\" (UniqueName: \"kubernetes.io/projected/920e2328-777b-4ee4-8bda-f5dc74435740-kube-api-access-kw6xn\") pod \"neutron-4573-account-create-chmkq\" (UID: \"920e2328-777b-4ee4-8bda-f5dc74435740\") " pod="openstack/neutron-4573-account-create-chmkq" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.535803 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/920e2328-777b-4ee4-8bda-f5dc74435740-operator-scripts\") pod \"neutron-4573-account-create-chmkq\" (UID: \"920e2328-777b-4ee4-8bda-f5dc74435740\") " pod="openstack/neutron-4573-account-create-chmkq" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.536496 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/920e2328-777b-4ee4-8bda-f5dc74435740-operator-scripts\") pod \"neutron-4573-account-create-chmkq\" (UID: \"920e2328-777b-4ee4-8bda-f5dc74435740\") " pod="openstack/neutron-4573-account-create-chmkq" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.557408 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kw6xn\" (UniqueName: \"kubernetes.io/projected/920e2328-777b-4ee4-8bda-f5dc74435740-kube-api-access-kw6xn\") pod \"neutron-4573-account-create-chmkq\" (UID: \"920e2328-777b-4ee4-8bda-f5dc74435740\") " pod="openstack/neutron-4573-account-create-chmkq" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.589096 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.597920 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-twnk8"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.637483 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4573-account-create-chmkq" Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.851321 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e3fb-account-create-w7xhv"] Nov 25 09:22:24 crc kubenswrapper[4687]: I1125 09:22:24.976307 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-86dxw"] Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.029201 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-gtv52"] Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.104199 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-de73-account-create-p9ft5"] Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.280362 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4573-account-create-chmkq"] Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.301180 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-gtv52" event={"ID":"57674a17-12fc-4749-bcc9-19a92f1c5016","Type":"ContainerStarted","Data":"1622d1de0c191c4fdebc1b0f113ba993d3cdf00b40d57882004687d7803a9a55"} Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.306577 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e3fb-account-create-w7xhv" event={"ID":"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c","Type":"ContainerStarted","Data":"8f5df09239f188ac4bbdd09c73782c357a8b598255bb9101a0d6ee02f8093546"} Nov 25 09:22:25 crc kubenswrapper[4687]: W1125 09:22:25.307134 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod920e2328_777b_4ee4_8bda_f5dc74435740.slice/crio-36e4adcdfc3333189068991e54d102adcd3412691ff6deb7222ddfdebe51abe5 WatchSource:0}: Error finding container 36e4adcdfc3333189068991e54d102adcd3412691ff6deb7222ddfdebe51abe5: Status 404 returned error can't find the container with id 36e4adcdfc3333189068991e54d102adcd3412691ff6deb7222ddfdebe51abe5 Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.318643 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-86dxw" event={"ID":"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf","Type":"ContainerStarted","Data":"d33a9d2f4541e976ebeacdd7c3236eccbbd192fee65ebbc2dace0e719660f92d"} Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.320475 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-de73-account-create-p9ft5" event={"ID":"95391d75-b60d-49a7-892d-4236d940363c","Type":"ContainerStarted","Data":"13e7ace4f8c3737d6804d8d449c63389d28edfd89f8499fb74346de73d76ec4e"} Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.324120 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-twnk8" event={"ID":"10b1fc74-8906-4881-a45c-e812501c6f2f","Type":"ContainerStarted","Data":"d1e7ed6f76d1852f73fe9a4de87d76c3f9ca66778fd1d386023a31c1b5112e2b"} Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.324183 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-twnk8" event={"ID":"10b1fc74-8906-4881-a45c-e812501c6f2f","Type":"ContainerStarted","Data":"f41bbfa11ab0ad8d48b82157eb791a3da6c072ce538ff1e40b85a79eb8698d24"} Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.327702 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-pstdr"] Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.744760 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="206cc7c7-5ea3-4300-8017-d6dc3a5c279e" path="/var/lib/kubelet/pods/206cc7c7-5ea3-4300-8017-d6dc3a5c279e/volumes" Nov 25 09:22:25 crc kubenswrapper[4687]: I1125 09:22:25.762899 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-twnk8" podStartSLOduration=2.762879056 podStartE2EDuration="2.762879056s" podCreationTimestamp="2025-11-25 09:22:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:22:25.341842086 +0000 UTC m=+1140.395481804" watchObservedRunningTime="2025-11-25 09:22:25.762879056 +0000 UTC m=+1140.816518774" Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.334282 4687 generic.go:334] "Generic (PLEG): container finished" podID="95391d75-b60d-49a7-892d-4236d940363c" containerID="3a669b3bdbbfd9112d5f4ac21b84ed14248341d3c43fb8c11364760c13fc25da" exitCode=0 Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.334756 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-de73-account-create-p9ft5" event={"ID":"95391d75-b60d-49a7-892d-4236d940363c","Type":"ContainerDied","Data":"3a669b3bdbbfd9112d5f4ac21b84ed14248341d3c43fb8c11364760c13fc25da"} Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.336713 4687 generic.go:334] "Generic (PLEG): container finished" podID="10b1fc74-8906-4881-a45c-e812501c6f2f" containerID="d1e7ed6f76d1852f73fe9a4de87d76c3f9ca66778fd1d386023a31c1b5112e2b" exitCode=0 Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.336853 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-twnk8" event={"ID":"10b1fc74-8906-4881-a45c-e812501c6f2f","Type":"ContainerDied","Data":"d1e7ed6f76d1852f73fe9a4de87d76c3f9ca66778fd1d386023a31c1b5112e2b"} Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.339277 4687 generic.go:334] "Generic (PLEG): container finished" podID="920e2328-777b-4ee4-8bda-f5dc74435740" containerID="5625287b135b93d6bcf52e4fa8cd16090ebdf51ec368497b1d478502f2036f0b" exitCode=0 Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.339333 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4573-account-create-chmkq" event={"ID":"920e2328-777b-4ee4-8bda-f5dc74435740","Type":"ContainerDied","Data":"5625287b135b93d6bcf52e4fa8cd16090ebdf51ec368497b1d478502f2036f0b"} Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.339359 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4573-account-create-chmkq" event={"ID":"920e2328-777b-4ee4-8bda-f5dc74435740","Type":"ContainerStarted","Data":"36e4adcdfc3333189068991e54d102adcd3412691ff6deb7222ddfdebe51abe5"} Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.343681 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pstdr" event={"ID":"66108854-2b2f-49c8-be7a-b80e336dc27b","Type":"ContainerStarted","Data":"82ecc1f88545c4c052b3caf4466185a9a8ba9f54035091efbfaa0791dca3a8b8"} Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.347477 4687 generic.go:334] "Generic (PLEG): container finished" podID="57674a17-12fc-4749-bcc9-19a92f1c5016" containerID="77036674894a1e19533a43aaaf9c024bf19dc8d581ee94d5f53ec3a9c2fcf466" exitCode=0 Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.347574 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-gtv52" event={"ID":"57674a17-12fc-4749-bcc9-19a92f1c5016","Type":"ContainerDied","Data":"77036674894a1e19533a43aaaf9c024bf19dc8d581ee94d5f53ec3a9c2fcf466"} Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.349232 4687 generic.go:334] "Generic (PLEG): container finished" podID="ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c" containerID="c7ccf1d84702ff8e9d7c8a779246903c8ed9393e0e9556c3469fcce052c98806" exitCode=0 Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.349318 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e3fb-account-create-w7xhv" event={"ID":"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c","Type":"ContainerDied","Data":"c7ccf1d84702ff8e9d7c8a779246903c8ed9393e0e9556c3469fcce052c98806"} Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.356361 4687 generic.go:334] "Generic (PLEG): container finished" podID="1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf" containerID="ff8f7d54639c140489b1e10a5729e8b959822917f0b9761b9848307e7c07f584" exitCode=0 Nov 25 09:22:26 crc kubenswrapper[4687]: I1125 09:22:26.356408 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-86dxw" event={"ID":"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf","Type":"ContainerDied","Data":"ff8f7d54639c140489b1e10a5729e8b959822917f0b9761b9848307e7c07f584"} Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.391776 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-86dxw" event={"ID":"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf","Type":"ContainerDied","Data":"d33a9d2f4541e976ebeacdd7c3236eccbbd192fee65ebbc2dace0e719660f92d"} Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.392309 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d33a9d2f4541e976ebeacdd7c3236eccbbd192fee65ebbc2dace0e719660f92d" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.393661 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e3fb-account-create-w7xhv" event={"ID":"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c","Type":"ContainerDied","Data":"8f5df09239f188ac4bbdd09c73782c357a8b598255bb9101a0d6ee02f8093546"} Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.393688 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f5df09239f188ac4bbdd09c73782c357a8b598255bb9101a0d6ee02f8093546" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.395035 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-de73-account-create-p9ft5" event={"ID":"95391d75-b60d-49a7-892d-4236d940363c","Type":"ContainerDied","Data":"13e7ace4f8c3737d6804d8d449c63389d28edfd89f8499fb74346de73d76ec4e"} Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.395078 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13e7ace4f8c3737d6804d8d449c63389d28edfd89f8499fb74346de73d76ec4e" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.397667 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-twnk8" event={"ID":"10b1fc74-8906-4881-a45c-e812501c6f2f","Type":"ContainerDied","Data":"f41bbfa11ab0ad8d48b82157eb791a3da6c072ce538ff1e40b85a79eb8698d24"} Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.397702 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f41bbfa11ab0ad8d48b82157eb791a3da6c072ce538ff1e40b85a79eb8698d24" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.399530 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4573-account-create-chmkq" event={"ID":"920e2328-777b-4ee4-8bda-f5dc74435740","Type":"ContainerDied","Data":"36e4adcdfc3333189068991e54d102adcd3412691ff6deb7222ddfdebe51abe5"} Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.399551 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36e4adcdfc3333189068991e54d102adcd3412691ff6deb7222ddfdebe51abe5" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.402613 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-gtv52" event={"ID":"57674a17-12fc-4749-bcc9-19a92f1c5016","Type":"ContainerDied","Data":"1622d1de0c191c4fdebc1b0f113ba993d3cdf00b40d57882004687d7803a9a55"} Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.402643 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1622d1de0c191c4fdebc1b0f113ba993d3cdf00b40d57882004687d7803a9a55" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.522375 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e3fb-account-create-w7xhv" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.625686 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4573-account-create-chmkq" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.634381 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-de73-account-create-p9ft5" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.642442 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-operator-scripts\") pod \"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c\" (UID: \"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.642667 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7fnk\" (UniqueName: \"kubernetes.io/projected/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-kube-api-access-w7fnk\") pod \"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c\" (UID: \"ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.643319 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c" (UID: "ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.646066 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-kube-api-access-w7fnk" (OuterVolumeSpecName: "kube-api-access-w7fnk") pod "ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c" (UID: "ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c"). InnerVolumeSpecName "kube-api-access-w7fnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.664140 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gtv52" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.674596 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-86dxw" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.685399 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-twnk8" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.744620 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kw6xn\" (UniqueName: \"kubernetes.io/projected/920e2328-777b-4ee4-8bda-f5dc74435740-kube-api-access-kw6xn\") pod \"920e2328-777b-4ee4-8bda-f5dc74435740\" (UID: \"920e2328-777b-4ee4-8bda-f5dc74435740\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.744672 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnswn\" (UniqueName: \"kubernetes.io/projected/57674a17-12fc-4749-bcc9-19a92f1c5016-kube-api-access-nnswn\") pod \"57674a17-12fc-4749-bcc9-19a92f1c5016\" (UID: \"57674a17-12fc-4749-bcc9-19a92f1c5016\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.744753 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57674a17-12fc-4749-bcc9-19a92f1c5016-operator-scripts\") pod \"57674a17-12fc-4749-bcc9-19a92f1c5016\" (UID: \"57674a17-12fc-4749-bcc9-19a92f1c5016\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.744782 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4p2t6\" (UniqueName: \"kubernetes.io/projected/95391d75-b60d-49a7-892d-4236d940363c-kube-api-access-4p2t6\") pod \"95391d75-b60d-49a7-892d-4236d940363c\" (UID: \"95391d75-b60d-49a7-892d-4236d940363c\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.744807 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10b1fc74-8906-4881-a45c-e812501c6f2f-operator-scripts\") pod \"10b1fc74-8906-4881-a45c-e812501c6f2f\" (UID: \"10b1fc74-8906-4881-a45c-e812501c6f2f\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.744824 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xt4jn\" (UniqueName: \"kubernetes.io/projected/10b1fc74-8906-4881-a45c-e812501c6f2f-kube-api-access-xt4jn\") pod \"10b1fc74-8906-4881-a45c-e812501c6f2f\" (UID: \"10b1fc74-8906-4881-a45c-e812501c6f2f\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.744856 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/920e2328-777b-4ee4-8bda-f5dc74435740-operator-scripts\") pod \"920e2328-777b-4ee4-8bda-f5dc74435740\" (UID: \"920e2328-777b-4ee4-8bda-f5dc74435740\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.744894 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctnhn\" (UniqueName: \"kubernetes.io/projected/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-kube-api-access-ctnhn\") pod \"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf\" (UID: \"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.744924 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-operator-scripts\") pod \"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf\" (UID: \"1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.744982 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95391d75-b60d-49a7-892d-4236d940363c-operator-scripts\") pod \"95391d75-b60d-49a7-892d-4236d940363c\" (UID: \"95391d75-b60d-49a7-892d-4236d940363c\") " Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.745258 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.745546 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7fnk\" (UniqueName: \"kubernetes.io/projected/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c-kube-api-access-w7fnk\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.745866 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10b1fc74-8906-4881-a45c-e812501c6f2f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "10b1fc74-8906-4881-a45c-e812501c6f2f" (UID: "10b1fc74-8906-4881-a45c-e812501c6f2f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.745967 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/95391d75-b60d-49a7-892d-4236d940363c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "95391d75-b60d-49a7-892d-4236d940363c" (UID: "95391d75-b60d-49a7-892d-4236d940363c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.748494 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/920e2328-777b-4ee4-8bda-f5dc74435740-kube-api-access-kw6xn" (OuterVolumeSpecName: "kube-api-access-kw6xn") pod "920e2328-777b-4ee4-8bda-f5dc74435740" (UID: "920e2328-777b-4ee4-8bda-f5dc74435740"). InnerVolumeSpecName "kube-api-access-kw6xn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.748983 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10b1fc74-8906-4881-a45c-e812501c6f2f-kube-api-access-xt4jn" (OuterVolumeSpecName: "kube-api-access-xt4jn") pod "10b1fc74-8906-4881-a45c-e812501c6f2f" (UID: "10b1fc74-8906-4881-a45c-e812501c6f2f"). InnerVolumeSpecName "kube-api-access-xt4jn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.749242 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57674a17-12fc-4749-bcc9-19a92f1c5016-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "57674a17-12fc-4749-bcc9-19a92f1c5016" (UID: "57674a17-12fc-4749-bcc9-19a92f1c5016"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.749341 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/920e2328-777b-4ee4-8bda-f5dc74435740-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "920e2328-777b-4ee4-8bda-f5dc74435740" (UID: "920e2328-777b-4ee4-8bda-f5dc74435740"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.749473 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57674a17-12fc-4749-bcc9-19a92f1c5016-kube-api-access-nnswn" (OuterVolumeSpecName: "kube-api-access-nnswn") pod "57674a17-12fc-4749-bcc9-19a92f1c5016" (UID: "57674a17-12fc-4749-bcc9-19a92f1c5016"). InnerVolumeSpecName "kube-api-access-nnswn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.749859 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf" (UID: "1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.752912 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95391d75-b60d-49a7-892d-4236d940363c-kube-api-access-4p2t6" (OuterVolumeSpecName: "kube-api-access-4p2t6") pod "95391d75-b60d-49a7-892d-4236d940363c" (UID: "95391d75-b60d-49a7-892d-4236d940363c"). InnerVolumeSpecName "kube-api-access-4p2t6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.754455 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-kube-api-access-ctnhn" (OuterVolumeSpecName: "kube-api-access-ctnhn") pod "1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf" (UID: "1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf"). InnerVolumeSpecName "kube-api-access-ctnhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.847032 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57674a17-12fc-4749-bcc9-19a92f1c5016-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.847827 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4p2t6\" (UniqueName: \"kubernetes.io/projected/95391d75-b60d-49a7-892d-4236d940363c-kube-api-access-4p2t6\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.847840 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10b1fc74-8906-4881-a45c-e812501c6f2f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.847849 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xt4jn\" (UniqueName: \"kubernetes.io/projected/10b1fc74-8906-4881-a45c-e812501c6f2f-kube-api-access-xt4jn\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.847857 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/920e2328-777b-4ee4-8bda-f5dc74435740-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.847868 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctnhn\" (UniqueName: \"kubernetes.io/projected/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-kube-api-access-ctnhn\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.847877 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.847886 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95391d75-b60d-49a7-892d-4236d940363c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.848169 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kw6xn\" (UniqueName: \"kubernetes.io/projected/920e2328-777b-4ee4-8bda-f5dc74435740-kube-api-access-kw6xn\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:30 crc kubenswrapper[4687]: I1125 09:22:30.848183 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnswn\" (UniqueName: \"kubernetes.io/projected/57674a17-12fc-4749-bcc9-19a92f1c5016-kube-api-access-nnswn\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:31 crc kubenswrapper[4687]: I1125 09:22:31.412144 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-gtv52" Nov 25 09:22:31 crc kubenswrapper[4687]: I1125 09:22:31.412173 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pstdr" event={"ID":"66108854-2b2f-49c8-be7a-b80e336dc27b","Type":"ContainerStarted","Data":"1146d2e7a146cc0e8fcc48e0a7b1f5e635536b39096eff2f449140de8e3fe0f4"} Nov 25 09:22:31 crc kubenswrapper[4687]: I1125 09:22:31.412201 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e3fb-account-create-w7xhv" Nov 25 09:22:31 crc kubenswrapper[4687]: I1125 09:22:31.412211 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4573-account-create-chmkq" Nov 25 09:22:31 crc kubenswrapper[4687]: I1125 09:22:31.412161 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-86dxw" Nov 25 09:22:31 crc kubenswrapper[4687]: I1125 09:22:31.412156 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-twnk8" Nov 25 09:22:31 crc kubenswrapper[4687]: I1125 09:22:31.412359 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-de73-account-create-p9ft5" Nov 25 09:22:31 crc kubenswrapper[4687]: I1125 09:22:31.434607 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-pstdr" podStartSLOduration=2.375003764 podStartE2EDuration="7.434586523s" podCreationTimestamp="2025-11-25 09:22:24 +0000 UTC" firstStartedPulling="2025-11-25 09:22:25.35624897 +0000 UTC m=+1140.409888688" lastFinishedPulling="2025-11-25 09:22:30.415831729 +0000 UTC m=+1145.469471447" observedRunningTime="2025-11-25 09:22:31.430264345 +0000 UTC m=+1146.483904073" watchObservedRunningTime="2025-11-25 09:22:31.434586523 +0000 UTC m=+1146.488226241" Nov 25 09:22:31 crc kubenswrapper[4687]: I1125 09:22:31.995936 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.059724 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tk9nm"] Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.060091 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" podUID="e3000b5f-af25-46df-9c97-42da2552090b" containerName="dnsmasq-dns" containerID="cri-o://0d37246c20425f7f23148c6b8bf89892a5f546115351c342cf9c5ad1962e69b9" gracePeriod=10 Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.441012 4687 generic.go:334] "Generic (PLEG): container finished" podID="e3000b5f-af25-46df-9c97-42da2552090b" containerID="0d37246c20425f7f23148c6b8bf89892a5f546115351c342cf9c5ad1962e69b9" exitCode=0 Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.441765 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" event={"ID":"e3000b5f-af25-46df-9c97-42da2552090b","Type":"ContainerDied","Data":"0d37246c20425f7f23148c6b8bf89892a5f546115351c342cf9c5ad1962e69b9"} Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.501400 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.580049 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-config\") pod \"e3000b5f-af25-46df-9c97-42da2552090b\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.580102 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smm5m\" (UniqueName: \"kubernetes.io/projected/e3000b5f-af25-46df-9c97-42da2552090b-kube-api-access-smm5m\") pod \"e3000b5f-af25-46df-9c97-42da2552090b\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.580185 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-sb\") pod \"e3000b5f-af25-46df-9c97-42da2552090b\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.580251 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-dns-svc\") pod \"e3000b5f-af25-46df-9c97-42da2552090b\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.580311 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-nb\") pod \"e3000b5f-af25-46df-9c97-42da2552090b\" (UID: \"e3000b5f-af25-46df-9c97-42da2552090b\") " Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.595720 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3000b5f-af25-46df-9c97-42da2552090b-kube-api-access-smm5m" (OuterVolumeSpecName: "kube-api-access-smm5m") pod "e3000b5f-af25-46df-9c97-42da2552090b" (UID: "e3000b5f-af25-46df-9c97-42da2552090b"). InnerVolumeSpecName "kube-api-access-smm5m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.621522 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e3000b5f-af25-46df-9c97-42da2552090b" (UID: "e3000b5f-af25-46df-9c97-42da2552090b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.622960 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e3000b5f-af25-46df-9c97-42da2552090b" (UID: "e3000b5f-af25-46df-9c97-42da2552090b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.625958 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-config" (OuterVolumeSpecName: "config") pod "e3000b5f-af25-46df-9c97-42da2552090b" (UID: "e3000b5f-af25-46df-9c97-42da2552090b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.631288 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e3000b5f-af25-46df-9c97-42da2552090b" (UID: "e3000b5f-af25-46df-9c97-42da2552090b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.683429 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.683843 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.683859 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.683875 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smm5m\" (UniqueName: \"kubernetes.io/projected/e3000b5f-af25-46df-9c97-42da2552090b-kube-api-access-smm5m\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:32 crc kubenswrapper[4687]: I1125 09:22:32.683888 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3000b5f-af25-46df-9c97-42da2552090b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:33 crc kubenswrapper[4687]: I1125 09:22:33.455841 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" event={"ID":"e3000b5f-af25-46df-9c97-42da2552090b","Type":"ContainerDied","Data":"3063516fd0ab1c82c3176245c2335fe4566aa336399e6711de80eda557bf292a"} Nov 25 09:22:33 crc kubenswrapper[4687]: I1125 09:22:33.455974 4687 scope.go:117] "RemoveContainer" containerID="0d37246c20425f7f23148c6b8bf89892a5f546115351c342cf9c5ad1962e69b9" Nov 25 09:22:33 crc kubenswrapper[4687]: I1125 09:22:33.456288 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-tk9nm" Nov 25 09:22:33 crc kubenswrapper[4687]: I1125 09:22:33.493661 4687 scope.go:117] "RemoveContainer" containerID="995a538132398c4fa35d58c6a5a4c4ed9ccbbc20e702e847c87aba05708147e9" Nov 25 09:22:33 crc kubenswrapper[4687]: I1125 09:22:33.520935 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tk9nm"] Nov 25 09:22:33 crc kubenswrapper[4687]: I1125 09:22:33.533340 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tk9nm"] Nov 25 09:22:33 crc kubenswrapper[4687]: I1125 09:22:33.747420 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3000b5f-af25-46df-9c97-42da2552090b" path="/var/lib/kubelet/pods/e3000b5f-af25-46df-9c97-42da2552090b/volumes" Nov 25 09:22:35 crc kubenswrapper[4687]: I1125 09:22:35.481792 4687 generic.go:334] "Generic (PLEG): container finished" podID="66108854-2b2f-49c8-be7a-b80e336dc27b" containerID="1146d2e7a146cc0e8fcc48e0a7b1f5e635536b39096eff2f449140de8e3fe0f4" exitCode=0 Nov 25 09:22:35 crc kubenswrapper[4687]: I1125 09:22:35.481901 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pstdr" event={"ID":"66108854-2b2f-49c8-be7a-b80e336dc27b","Type":"ContainerDied","Data":"1146d2e7a146cc0e8fcc48e0a7b1f5e635536b39096eff2f449140de8e3fe0f4"} Nov 25 09:22:36 crc kubenswrapper[4687]: I1125 09:22:36.820928 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:36 crc kubenswrapper[4687]: I1125 09:22:36.886827 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtrlj\" (UniqueName: \"kubernetes.io/projected/66108854-2b2f-49c8-be7a-b80e336dc27b-kube-api-access-gtrlj\") pod \"66108854-2b2f-49c8-be7a-b80e336dc27b\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " Nov 25 09:22:36 crc kubenswrapper[4687]: I1125 09:22:36.886881 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-combined-ca-bundle\") pod \"66108854-2b2f-49c8-be7a-b80e336dc27b\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " Nov 25 09:22:36 crc kubenswrapper[4687]: I1125 09:22:36.886945 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-config-data\") pod \"66108854-2b2f-49c8-be7a-b80e336dc27b\" (UID: \"66108854-2b2f-49c8-be7a-b80e336dc27b\") " Nov 25 09:22:36 crc kubenswrapper[4687]: I1125 09:22:36.893795 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66108854-2b2f-49c8-be7a-b80e336dc27b-kube-api-access-gtrlj" (OuterVolumeSpecName: "kube-api-access-gtrlj") pod "66108854-2b2f-49c8-be7a-b80e336dc27b" (UID: "66108854-2b2f-49c8-be7a-b80e336dc27b"). InnerVolumeSpecName "kube-api-access-gtrlj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:36 crc kubenswrapper[4687]: I1125 09:22:36.918268 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "66108854-2b2f-49c8-be7a-b80e336dc27b" (UID: "66108854-2b2f-49c8-be7a-b80e336dc27b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:36 crc kubenswrapper[4687]: I1125 09:22:36.954586 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-config-data" (OuterVolumeSpecName: "config-data") pod "66108854-2b2f-49c8-be7a-b80e336dc27b" (UID: "66108854-2b2f-49c8-be7a-b80e336dc27b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:36 crc kubenswrapper[4687]: I1125 09:22:36.989251 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtrlj\" (UniqueName: \"kubernetes.io/projected/66108854-2b2f-49c8-be7a-b80e336dc27b-kube-api-access-gtrlj\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:36 crc kubenswrapper[4687]: I1125 09:22:36.989620 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:36 crc kubenswrapper[4687]: I1125 09:22:36.989630 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66108854-2b2f-49c8-be7a-b80e336dc27b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.499899 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pstdr" event={"ID":"66108854-2b2f-49c8-be7a-b80e336dc27b","Type":"ContainerDied","Data":"82ecc1f88545c4c052b3caf4466185a9a8ba9f54035091efbfaa0791dca3a8b8"} Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.499939 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82ecc1f88545c4c052b3caf4466185a9a8ba9f54035091efbfaa0791dca3a8b8" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.499979 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pstdr" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.777799 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-4zwjp"] Nov 25 09:22:37 crc kubenswrapper[4687]: E1125 09:22:37.778182 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b1fc74-8906-4881-a45c-e812501c6f2f" containerName="mariadb-database-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778201 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b1fc74-8906-4881-a45c-e812501c6f2f" containerName="mariadb-database-create" Nov 25 09:22:37 crc kubenswrapper[4687]: E1125 09:22:37.778213 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57674a17-12fc-4749-bcc9-19a92f1c5016" containerName="mariadb-database-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778219 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="57674a17-12fc-4749-bcc9-19a92f1c5016" containerName="mariadb-database-create" Nov 25 09:22:37 crc kubenswrapper[4687]: E1125 09:22:37.778232 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c" containerName="mariadb-account-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778237 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c" containerName="mariadb-account-create" Nov 25 09:22:37 crc kubenswrapper[4687]: E1125 09:22:37.778251 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="920e2328-777b-4ee4-8bda-f5dc74435740" containerName="mariadb-account-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778257 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="920e2328-777b-4ee4-8bda-f5dc74435740" containerName="mariadb-account-create" Nov 25 09:22:37 crc kubenswrapper[4687]: E1125 09:22:37.778273 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66108854-2b2f-49c8-be7a-b80e336dc27b" containerName="keystone-db-sync" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778280 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="66108854-2b2f-49c8-be7a-b80e336dc27b" containerName="keystone-db-sync" Nov 25 09:22:37 crc kubenswrapper[4687]: E1125 09:22:37.778290 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3000b5f-af25-46df-9c97-42da2552090b" containerName="init" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778296 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3000b5f-af25-46df-9c97-42da2552090b" containerName="init" Nov 25 09:22:37 crc kubenswrapper[4687]: E1125 09:22:37.778314 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf" containerName="mariadb-database-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778319 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf" containerName="mariadb-database-create" Nov 25 09:22:37 crc kubenswrapper[4687]: E1125 09:22:37.778329 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95391d75-b60d-49a7-892d-4236d940363c" containerName="mariadb-account-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778334 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="95391d75-b60d-49a7-892d-4236d940363c" containerName="mariadb-account-create" Nov 25 09:22:37 crc kubenswrapper[4687]: E1125 09:22:37.778346 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3000b5f-af25-46df-9c97-42da2552090b" containerName="dnsmasq-dns" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778351 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3000b5f-af25-46df-9c97-42da2552090b" containerName="dnsmasq-dns" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778543 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="920e2328-777b-4ee4-8bda-f5dc74435740" containerName="mariadb-account-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778556 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="57674a17-12fc-4749-bcc9-19a92f1c5016" containerName="mariadb-database-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778565 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="95391d75-b60d-49a7-892d-4236d940363c" containerName="mariadb-account-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778573 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="10b1fc74-8906-4881-a45c-e812501c6f2f" containerName="mariadb-database-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778585 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c" containerName="mariadb-account-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778592 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="66108854-2b2f-49c8-be7a-b80e336dc27b" containerName="keystone-db-sync" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778603 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3000b5f-af25-46df-9c97-42da2552090b" containerName="dnsmasq-dns" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.778614 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf" containerName="mariadb-database-create" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.779400 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.801867 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-sb55x"] Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.803001 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.806020 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.806287 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.806402 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.806820 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.806938 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-l777p" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.820776 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-4zwjp"] Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.845325 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-sb55x"] Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907398 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-credential-keys\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907460 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907605 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-scripts\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907672 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907729 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-fernet-keys\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907808 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxgkm\" (UniqueName: \"kubernetes.io/projected/df7139f0-f8b5-41bc-95f3-6839dcb3231b-kube-api-access-bxgkm\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907827 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907849 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907914 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-config-data\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907963 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-combined-ca-bundle\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.907997 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-config\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.908062 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsg6s\" (UniqueName: \"kubernetes.io/projected/95185182-622d-4bad-866e-80054cb0780a-kube-api-access-rsg6s\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.930328 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5788b95877-j7l5l"] Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.933188 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.941578 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.941892 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.942041 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.942718 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-9bx4m" Nov 25 09:22:37 crc kubenswrapper[4687]: I1125 09:22:37.967261 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5788b95877-j7l5l"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.009850 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wd4sq\" (UniqueName: \"kubernetes.io/projected/41b80a26-d97f-4344-8489-cfa0dbbaf99f-kube-api-access-wd4sq\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.009891 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-scripts\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.009921 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-credential-keys\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.009938 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/41b80a26-d97f-4344-8489-cfa0dbbaf99f-horizon-secret-key\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.009959 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.009975 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-scripts\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010002 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010030 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-fernet-keys\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010059 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxgkm\" (UniqueName: \"kubernetes.io/projected/df7139f0-f8b5-41bc-95f3-6839dcb3231b-kube-api-access-bxgkm\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010075 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010089 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010113 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-config-data\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010131 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41b80a26-d97f-4344-8489-cfa0dbbaf99f-logs\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010152 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-config-data\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010175 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-combined-ca-bundle\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010196 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-config\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.010227 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsg6s\" (UniqueName: \"kubernetes.io/projected/95185182-622d-4bad-866e-80054cb0780a-kube-api-access-rsg6s\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.012588 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.013188 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.013950 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.014949 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.017458 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-2h86j"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.018780 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.020477 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-config\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.026903 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-scripts\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.027142 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-credential-keys\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.028455 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-combined-ca-bundle\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.033064 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.033329 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-nwxld" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.035449 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-config-data\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.037393 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-fernet-keys\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.042000 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.056495 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-2h86j"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.058419 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxgkm\" (UniqueName: \"kubernetes.io/projected/df7139f0-f8b5-41bc-95f3-6839dcb3231b-kube-api-access-bxgkm\") pod \"dnsmasq-dns-bbf5cc879-4zwjp\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.074545 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsg6s\" (UniqueName: \"kubernetes.io/projected/95185182-622d-4bad-866e-80054cb0780a-kube-api-access-rsg6s\") pod \"keystone-bootstrap-sb55x\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.085435 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-765t4"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.086532 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.089313 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.089610 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.089844 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2rdqk" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.104829 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.108460 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-765t4"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111574 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-combined-ca-bundle\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111622 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41b80a26-d97f-4344-8489-cfa0dbbaf99f-logs\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111647 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-config-data\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111666 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-config-data\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111740 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wd4sq\" (UniqueName: \"kubernetes.io/projected/41b80a26-d97f-4344-8489-cfa0dbbaf99f-kube-api-access-wd4sq\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111772 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-scripts\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111811 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/53681654-97b6-4586-ba53-8b6b018e04fa-etc-machine-id\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111837 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/41b80a26-d97f-4344-8489-cfa0dbbaf99f-horizon-secret-key\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111866 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzvlh\" (UniqueName: \"kubernetes.io/projected/53681654-97b6-4586-ba53-8b6b018e04fa-kube-api-access-pzvlh\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111895 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-scripts\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.111938 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-db-sync-config-data\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.112416 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41b80a26-d97f-4344-8489-cfa0dbbaf99f-logs\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.113590 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-config-data\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.114141 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-scripts\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.130313 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/41b80a26-d97f-4344-8489-cfa0dbbaf99f-horizon-secret-key\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.149394 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.160318 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wd4sq\" (UniqueName: \"kubernetes.io/projected/41b80a26-d97f-4344-8489-cfa0dbbaf99f-kube-api-access-wd4sq\") pod \"horizon-5788b95877-j7l5l\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.194740 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-86977dc76f-6cpw4"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.211042 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.213672 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-combined-ca-bundle\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.213706 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-config-data\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.213755 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlffs\" (UniqueName: \"kubernetes.io/projected/7bb5f689-fd43-4fa3-b5a9-6603155ff184-kube-api-access-nlffs\") pod \"neutron-db-sync-765t4\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.213798 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/53681654-97b6-4586-ba53-8b6b018e04fa-etc-machine-id\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.213821 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzvlh\" (UniqueName: \"kubernetes.io/projected/53681654-97b6-4586-ba53-8b6b018e04fa-kube-api-access-pzvlh\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.213835 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-scripts\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.213868 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-db-sync-config-data\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.213892 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-combined-ca-bundle\") pod \"neutron-db-sync-765t4\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.213919 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-config\") pod \"neutron-db-sync-765t4\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.216742 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/53681654-97b6-4586-ba53-8b6b018e04fa-etc-machine-id\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.223331 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-scripts\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.232457 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-config-data\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.232537 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-86977dc76f-6cpw4"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.240238 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-db-sync-config-data\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.247306 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-combined-ca-bundle\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.273810 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzvlh\" (UniqueName: \"kubernetes.io/projected/53681654-97b6-4586-ba53-8b6b018e04fa-kube-api-access-pzvlh\") pod \"cinder-db-sync-2h86j\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.275643 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.285992 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-4zwjp"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.310683 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2h86j" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.314297 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.315283 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-config-data\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.315320 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-combined-ca-bundle\") pod \"neutron-db-sync-765t4\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.315353 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-config\") pod \"neutron-db-sync-765t4\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.315374 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d875a4e8-0091-41cb-9a53-2cd74bdf853c-horizon-secret-key\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.315422 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlffs\" (UniqueName: \"kubernetes.io/projected/7bb5f689-fd43-4fa3-b5a9-6603155ff184-kube-api-access-nlffs\") pod \"neutron-db-sync-765t4\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.315455 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-scripts\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.315472 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8nzz\" (UniqueName: \"kubernetes.io/projected/d875a4e8-0091-41cb-9a53-2cd74bdf853c-kube-api-access-g8nzz\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.315536 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d875a4e8-0091-41cb-9a53-2cd74bdf853c-logs\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.316192 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.319383 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.319602 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.329139 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-config\") pod \"neutron-db-sync-765t4\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.329199 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.340184 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-combined-ca-bundle\") pod \"neutron-db-sync-765t4\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.348564 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-gpc68"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.349593 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.377083 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.377629 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.381066 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-htp9p" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418103 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mm6kk\" (UniqueName: \"kubernetes.io/projected/7c2bb808-f45c-4126-94b9-36187402c9d7-kube-api-access-mm6kk\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418185 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-log-httpd\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418303 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-scripts\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418340 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d875a4e8-0091-41cb-9a53-2cd74bdf853c-logs\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418394 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418416 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-config-data\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418510 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d875a4e8-0091-41cb-9a53-2cd74bdf853c-horizon-secret-key\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418552 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fgh5\" (UniqueName: \"kubernetes.io/projected/09662da0-b802-43c3-9c8e-4c9e951bdd7f-kube-api-access-5fgh5\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418586 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-combined-ca-bundle\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418607 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-config-data\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418642 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418677 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c2bb808-f45c-4126-94b9-36187402c9d7-logs\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418696 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-scripts\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418718 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-config-data\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418746 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-run-httpd\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418770 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-scripts\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.418789 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8nzz\" (UniqueName: \"kubernetes.io/projected/d875a4e8-0091-41cb-9a53-2cd74bdf853c-kube-api-access-g8nzz\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.420396 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d875a4e8-0091-41cb-9a53-2cd74bdf853c-logs\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.423472 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-config-data\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.425708 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-scripts\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.437798 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlffs\" (UniqueName: \"kubernetes.io/projected/7bb5f689-fd43-4fa3-b5a9-6603155ff184-kube-api-access-nlffs\") pod \"neutron-db-sync-765t4\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.445405 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-gpc68"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.470745 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d875a4e8-0091-41cb-9a53-2cd74bdf853c-horizon-secret-key\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.472210 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-v7qrn"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.475252 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.476408 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8nzz\" (UniqueName: \"kubernetes.io/projected/d875a4e8-0091-41cb-9a53-2cd74bdf853c-kube-api-access-g8nzz\") pod \"horizon-86977dc76f-6cpw4\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520031 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520099 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fgh5\" (UniqueName: \"kubernetes.io/projected/09662da0-b802-43c3-9c8e-4c9e951bdd7f-kube-api-access-5fgh5\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520121 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-config\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520146 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-combined-ca-bundle\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520163 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-config-data\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520194 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520230 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c2bb808-f45c-4126-94b9-36187402c9d7-logs\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520247 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-scripts\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520261 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-config-data\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520283 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-run-httpd\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520330 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mm6kk\" (UniqueName: \"kubernetes.io/projected/7c2bb808-f45c-4126-94b9-36187402c9d7-kube-api-access-mm6kk\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520347 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520370 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-log-httpd\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520397 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-scripts\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520414 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520433 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520459 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqvzs\" (UniqueName: \"kubernetes.io/projected/080ab56b-8da0-4b11-9595-6766031cfb41-kube-api-access-hqvzs\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.520486 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.522119 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-v7qrn"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.526568 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-run-httpd\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.529210 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c2bb808-f45c-4126-94b9-36187402c9d7-logs\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.529655 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-log-httpd\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.532017 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-8b7k2"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.533784 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.537141 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.537313 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-gw9r5" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.543053 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-config-data\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.554563 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-combined-ca-bundle\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.554904 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-scripts\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.560909 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-scripts\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.562195 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.562321 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-config-data\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.564569 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.570337 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-8b7k2"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.571127 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mm6kk\" (UniqueName: \"kubernetes.io/projected/7c2bb808-f45c-4126-94b9-36187402c9d7-kube-api-access-mm6kk\") pod \"placement-db-sync-gpc68\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.571864 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fgh5\" (UniqueName: \"kubernetes.io/projected/09662da0-b802-43c3-9c8e-4c9e951bdd7f-kube-api-access-5fgh5\") pod \"ceilometer-0\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.608258 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.610106 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.622869 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.623072 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.623189 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rggzb" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.623302 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.629397 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.629488 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.629536 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqvzs\" (UniqueName: \"kubernetes.io/projected/080ab56b-8da0-4b11-9595-6766031cfb41-kube-api-access-hqvzs\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.629567 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.629609 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.629693 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-config\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.630735 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-config\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.631428 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.632304 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.632588 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.633081 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.647447 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.651824 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-765t4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.659916 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqvzs\" (UniqueName: \"kubernetes.io/projected/080ab56b-8da0-4b11-9595-6766031cfb41-kube-api-access-hqvzs\") pod \"dnsmasq-dns-56df8fb6b7-v7qrn\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.686830 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.688711 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.692540 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.692776 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.702716 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.728287 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731063 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxnx5\" (UniqueName: \"kubernetes.io/projected/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-kube-api-access-wxnx5\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731146 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731179 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxt6v\" (UniqueName: \"kubernetes.io/projected/5d974fcb-fbad-4f24-9857-a791205029a0-kube-api-access-gxt6v\") pod \"barbican-db-sync-8b7k2\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731199 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-scripts\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731226 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731264 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-combined-ca-bundle\") pod \"barbican-db-sync-8b7k2\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731298 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-db-sync-config-data\") pod \"barbican-db-sync-8b7k2\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731320 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731411 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731433 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-config-data\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.731463 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-logs\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.756998 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gpc68" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.760438 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.770821 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-4zwjp"] Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833482 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833544 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxnx5\" (UniqueName: \"kubernetes.io/projected/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-kube-api-access-wxnx5\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833579 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833616 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833668 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833710 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hmnl\" (UniqueName: \"kubernetes.io/projected/8ad986b1-c02f-4462-9330-0acba7d7001f-kube-api-access-9hmnl\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833733 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxt6v\" (UniqueName: \"kubernetes.io/projected/5d974fcb-fbad-4f24-9857-a791205029a0-kube-api-access-gxt6v\") pod \"barbican-db-sync-8b7k2\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833751 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-scripts\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833768 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-logs\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833786 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833808 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-combined-ca-bundle\") pod \"barbican-db-sync-8b7k2\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833874 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-db-sync-config-data\") pod \"barbican-db-sync-8b7k2\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833895 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833923 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833951 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833971 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.833989 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.834008 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-config-data\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.834045 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-logs\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.834994 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.836079 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.839805 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-logs\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.843377 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-scripts\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.844667 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-db-sync-config-data\") pod \"barbican-db-sync-8b7k2\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.847202 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-config-data\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.853036 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.857022 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-combined-ca-bundle\") pod \"barbican-db-sync-8b7k2\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.857858 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.858473 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxnx5\" (UniqueName: \"kubernetes.io/projected/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-kube-api-access-wxnx5\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.861087 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxt6v\" (UniqueName: \"kubernetes.io/projected/5d974fcb-fbad-4f24-9857-a791205029a0-kube-api-access-gxt6v\") pod \"barbican-db-sync-8b7k2\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.892907 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.904621 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.923278 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.935767 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.935820 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.935843 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.935908 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.935930 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.935956 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.935988 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hmnl\" (UniqueName: \"kubernetes.io/projected/8ad986b1-c02f-4462-9330-0acba7d7001f-kube-api-access-9hmnl\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.936015 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-logs\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.936552 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-logs\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.938233 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.941002 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.944325 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.945855 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.951297 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.952241 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.984363 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.986527 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hmnl\" (UniqueName: \"kubernetes.io/projected/8ad986b1-c02f-4462-9330-0acba7d7001f-kube-api-access-9hmnl\") pod \"glance-default-internal-api-0\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:38 crc kubenswrapper[4687]: I1125 09:22:38.986728 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.021530 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.074362 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-2h86j"] Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.118936 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5788b95877-j7l5l"] Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.127397 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-sb55x"] Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.419845 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-765t4"] Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.431988 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.491883 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-86977dc76f-6cpw4"] Nov 25 09:22:39 crc kubenswrapper[4687]: W1125 09:22:39.493753 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd875a4e8_0091_41cb_9a53_2cd74bdf853c.slice/crio-73d9eeb28e45503f54a315a59e1ace2d9db6bf255d1625e97d2497b2e00220b3 WatchSource:0}: Error finding container 73d9eeb28e45503f54a315a59e1ace2d9db6bf255d1625e97d2497b2e00220b3: Status 404 returned error can't find the container with id 73d9eeb28e45503f54a315a59e1ace2d9db6bf255d1625e97d2497b2e00220b3 Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.517100 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-gpc68"] Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.558952 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2h86j" event={"ID":"53681654-97b6-4586-ba53-8b6b018e04fa","Type":"ContainerStarted","Data":"00cfb1c8362ee83370ad40a2f2ce4e06c53c63fc208c869c3150dd469eeec6c5"} Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.560585 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5788b95877-j7l5l" event={"ID":"41b80a26-d97f-4344-8489-cfa0dbbaf99f","Type":"ContainerStarted","Data":"df28394b0e78e92fe9f4c377eb084c04147c19e0991a1ba4fd0bf42b74460586"} Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.566306 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09662da0-b802-43c3-9c8e-4c9e951bdd7f","Type":"ContainerStarted","Data":"24dec1fee7ca90a481627e52c3764aad58ac8fdf62a3dead9cdbd91e5ac09180"} Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.567714 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-sb55x" event={"ID":"95185182-622d-4bad-866e-80054cb0780a","Type":"ContainerStarted","Data":"9392771a48fa67855903dd907e455ff3344e481a56e580525460b47385dc998e"} Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.569180 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gpc68" event={"ID":"7c2bb808-f45c-4126-94b9-36187402c9d7","Type":"ContainerStarted","Data":"548b3a99d23f3a3c278f9a75dd4289ae55a0e9cdca89666b2ffd6c7cd199df20"} Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.570311 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-765t4" event={"ID":"7bb5f689-fd43-4fa3-b5a9-6603155ff184","Type":"ContainerStarted","Data":"838c083fd028fd80789902ec20b3b738f2944c954f5cfbc708fd984ea08a866c"} Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.571374 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86977dc76f-6cpw4" event={"ID":"d875a4e8-0091-41cb-9a53-2cd74bdf853c","Type":"ContainerStarted","Data":"73d9eeb28e45503f54a315a59e1ace2d9db6bf255d1625e97d2497b2e00220b3"} Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.572845 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" event={"ID":"df7139f0-f8b5-41bc-95f3-6839dcb3231b","Type":"ContainerStarted","Data":"042708d7216313ece59313129978cdc0e01f3ca43420bb39c7605650c5767f82"} Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.612395 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-v7qrn"] Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.808208 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-8b7k2"] Nov 25 09:22:39 crc kubenswrapper[4687]: I1125 09:22:39.840059 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.080964 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.115529 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.134983 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5788b95877-j7l5l"] Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.153432 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.165742 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5b5c8fb8f9-zv6j6"] Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.167203 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.193907 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5b5c8fb8f9-zv6j6"] Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.274168 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-config-data\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.274215 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nx9lg\" (UniqueName: \"kubernetes.io/projected/5df3d575-c461-43d1-af22-d9f15aaf06b7-kube-api-access-nx9lg\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.274249 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5df3d575-c461-43d1-af22-d9f15aaf06b7-horizon-secret-key\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.274392 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5df3d575-c461-43d1-af22-d9f15aaf06b7-logs\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.274433 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-scripts\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.375677 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5df3d575-c461-43d1-af22-d9f15aaf06b7-logs\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.375730 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-scripts\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.375790 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-config-data\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.375811 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nx9lg\" (UniqueName: \"kubernetes.io/projected/5df3d575-c461-43d1-af22-d9f15aaf06b7-kube-api-access-nx9lg\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.375841 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5df3d575-c461-43d1-af22-d9f15aaf06b7-horizon-secret-key\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.377189 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-scripts\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.377433 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5df3d575-c461-43d1-af22-d9f15aaf06b7-logs\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.378371 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-config-data\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.396294 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5df3d575-c461-43d1-af22-d9f15aaf06b7-horizon-secret-key\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.396907 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nx9lg\" (UniqueName: \"kubernetes.io/projected/5df3d575-c461-43d1-af22-d9f15aaf06b7-kube-api-access-nx9lg\") pod \"horizon-5b5c8fb8f9-zv6j6\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.502569 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.586437 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8b7k2" event={"ID":"5d974fcb-fbad-4f24-9857-a791205029a0","Type":"ContainerStarted","Data":"92d0db8d8b072c3e4de51c5551db36e1144d3d4cb9f2282e4c669091d0ad501c"} Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.592217 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8ad986b1-c02f-4462-9330-0acba7d7001f","Type":"ContainerStarted","Data":"c887f83645c193562a9f7b2094ce0e118360c452ae255dc0e9c2883c1f9f3972"} Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.603042 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" event={"ID":"080ab56b-8da0-4b11-9595-6766031cfb41","Type":"ContainerStarted","Data":"05eda86e5101745a78fcdad654dc6c8f82aae8aea4ce49fde19c46f81ea69425"} Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.621403 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:22:40 crc kubenswrapper[4687]: W1125 09:22:40.633415 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7eb5a4e_4a09_40c8_9fd1_270bb7bee515.slice/crio-a0ce5e056a0dac81e3c3c615d21a30573f238be117753cd35e3576aae4a7b697 WatchSource:0}: Error finding container a0ce5e056a0dac81e3c3c615d21a30573f238be117753cd35e3576aae4a7b697: Status 404 returned error can't find the container with id a0ce5e056a0dac81e3c3c615d21a30573f238be117753cd35e3576aae4a7b697 Nov 25 09:22:40 crc kubenswrapper[4687]: I1125 09:22:40.985961 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5b5c8fb8f9-zv6j6"] Nov 25 09:22:40 crc kubenswrapper[4687]: W1125 09:22:40.990829 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5df3d575_c461_43d1_af22_d9f15aaf06b7.slice/crio-19d04e3b92d2ce26ee3566195dbdcb17347c98efcc0bd5078a5256a9019a6bf8 WatchSource:0}: Error finding container 19d04e3b92d2ce26ee3566195dbdcb17347c98efcc0bd5078a5256a9019a6bf8: Status 404 returned error can't find the container with id 19d04e3b92d2ce26ee3566195dbdcb17347c98efcc0bd5078a5256a9019a6bf8 Nov 25 09:22:41 crc kubenswrapper[4687]: I1125 09:22:41.614664 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515","Type":"ContainerStarted","Data":"a0ce5e056a0dac81e3c3c615d21a30573f238be117753cd35e3576aae4a7b697"} Nov 25 09:22:41 crc kubenswrapper[4687]: I1125 09:22:41.617986 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b5c8fb8f9-zv6j6" event={"ID":"5df3d575-c461-43d1-af22-d9f15aaf06b7","Type":"ContainerStarted","Data":"19d04e3b92d2ce26ee3566195dbdcb17347c98efcc0bd5078a5256a9019a6bf8"} Nov 25 09:22:45 crc kubenswrapper[4687]: I1125 09:22:45.686159 4687 generic.go:334] "Generic (PLEG): container finished" podID="df7139f0-f8b5-41bc-95f3-6839dcb3231b" containerID="32746a857358687ef9ba8fdd5bcacd51f46e559daadd3fb929fa9838f955a209" exitCode=0 Nov 25 09:22:45 crc kubenswrapper[4687]: I1125 09:22:45.686329 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" event={"ID":"df7139f0-f8b5-41bc-95f3-6839dcb3231b","Type":"ContainerDied","Data":"32746a857358687ef9ba8fdd5bcacd51f46e559daadd3fb929fa9838f955a209"} Nov 25 09:22:45 crc kubenswrapper[4687]: I1125 09:22:45.694969 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-sb55x" event={"ID":"95185182-622d-4bad-866e-80054cb0780a","Type":"ContainerStarted","Data":"dfc8332f355812154f4cd88cae810d7278e3be5e5005c77d3039c8bda72bab8c"} Nov 25 09:22:45 crc kubenswrapper[4687]: I1125 09:22:45.698131 4687 generic.go:334] "Generic (PLEG): container finished" podID="080ab56b-8da0-4b11-9595-6766031cfb41" containerID="927912f2959fecd35bd5afc3939186980ec12c65afb664317d2b756c7ec69edc" exitCode=0 Nov 25 09:22:45 crc kubenswrapper[4687]: I1125 09:22:45.698175 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" event={"ID":"080ab56b-8da0-4b11-9595-6766031cfb41","Type":"ContainerDied","Data":"927912f2959fecd35bd5afc3939186980ec12c65afb664317d2b756c7ec69edc"} Nov 25 09:22:45 crc kubenswrapper[4687]: I1125 09:22:45.728783 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-765t4" event={"ID":"7bb5f689-fd43-4fa3-b5a9-6603155ff184","Type":"ContainerStarted","Data":"8dc9ece059db60550aa80335a9b974c0c67756ac0fb2b73973e878346866870b"} Nov 25 09:22:45 crc kubenswrapper[4687]: I1125 09:22:45.770096 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515","Type":"ContainerStarted","Data":"1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff"} Nov 25 09:22:45 crc kubenswrapper[4687]: I1125 09:22:45.770140 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8ad986b1-c02f-4462-9330-0acba7d7001f","Type":"ContainerStarted","Data":"0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22"} Nov 25 09:22:45 crc kubenswrapper[4687]: I1125 09:22:45.779993 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-sb55x" podStartSLOduration=8.77996998 podStartE2EDuration="8.77996998s" podCreationTimestamp="2025-11-25 09:22:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:22:45.770369738 +0000 UTC m=+1160.824009456" watchObservedRunningTime="2025-11-25 09:22:45.77996998 +0000 UTC m=+1160.833609698" Nov 25 09:22:45 crc kubenswrapper[4687]: I1125 09:22:45.793195 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-765t4" podStartSLOduration=8.79317761 podStartE2EDuration="8.79317761s" podCreationTimestamp="2025-11-25 09:22:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:22:45.788180805 +0000 UTC m=+1160.841820523" watchObservedRunningTime="2025-11-25 09:22:45.79317761 +0000 UTC m=+1160.846817328" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.126628 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.181539 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-swift-storage-0\") pod \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.181579 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-nb\") pod \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.181640 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-sb\") pod \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.182574 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-svc\") pod \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.182645 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxgkm\" (UniqueName: \"kubernetes.io/projected/df7139f0-f8b5-41bc-95f3-6839dcb3231b-kube-api-access-bxgkm\") pod \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.182696 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-config\") pod \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\" (UID: \"df7139f0-f8b5-41bc-95f3-6839dcb3231b\") " Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.214793 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df7139f0-f8b5-41bc-95f3-6839dcb3231b-kube-api-access-bxgkm" (OuterVolumeSpecName: "kube-api-access-bxgkm") pod "df7139f0-f8b5-41bc-95f3-6839dcb3231b" (UID: "df7139f0-f8b5-41bc-95f3-6839dcb3231b"). InnerVolumeSpecName "kube-api-access-bxgkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.215578 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "df7139f0-f8b5-41bc-95f3-6839dcb3231b" (UID: "df7139f0-f8b5-41bc-95f3-6839dcb3231b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.215590 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "df7139f0-f8b5-41bc-95f3-6839dcb3231b" (UID: "df7139f0-f8b5-41bc-95f3-6839dcb3231b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.221945 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "df7139f0-f8b5-41bc-95f3-6839dcb3231b" (UID: "df7139f0-f8b5-41bc-95f3-6839dcb3231b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.229201 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-config" (OuterVolumeSpecName: "config") pod "df7139f0-f8b5-41bc-95f3-6839dcb3231b" (UID: "df7139f0-f8b5-41bc-95f3-6839dcb3231b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.247954 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "df7139f0-f8b5-41bc-95f3-6839dcb3231b" (UID: "df7139f0-f8b5-41bc-95f3-6839dcb3231b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.284566 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.284594 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxgkm\" (UniqueName: \"kubernetes.io/projected/df7139f0-f8b5-41bc-95f3-6839dcb3231b-kube-api-access-bxgkm\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.284608 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.284617 4687 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.284626 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.284634 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df7139f0-f8b5-41bc-95f3-6839dcb3231b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.767913 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8ad986b1-c02f-4462-9330-0acba7d7001f" containerName="glance-log" containerID="cri-o://0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22" gracePeriod=30 Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.768026 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8ad986b1-c02f-4462-9330-0acba7d7001f","Type":"ContainerStarted","Data":"44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174"} Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.768398 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8ad986b1-c02f-4462-9330-0acba7d7001f" containerName="glance-httpd" containerID="cri-o://44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174" gracePeriod=30 Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.771218 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.771220 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-4zwjp" event={"ID":"df7139f0-f8b5-41bc-95f3-6839dcb3231b","Type":"ContainerDied","Data":"042708d7216313ece59313129978cdc0e01f3ca43420bb39c7605650c5767f82"} Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.771974 4687 scope.go:117] "RemoveContainer" containerID="32746a857358687ef9ba8fdd5bcacd51f46e559daadd3fb929fa9838f955a209" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.789833 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" event={"ID":"080ab56b-8da0-4b11-9595-6766031cfb41","Type":"ContainerStarted","Data":"27d92533e56caa77f6e825bfea1df82e54c6c479b32b78267cddb6b34a17e48c"} Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.790704 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.810538 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=8.810488666 podStartE2EDuration="8.810488666s" podCreationTimestamp="2025-11-25 09:22:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:22:46.805566772 +0000 UTC m=+1161.859206490" watchObservedRunningTime="2025-11-25 09:22:46.810488666 +0000 UTC m=+1161.864128384" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.824276 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515","Type":"ContainerStarted","Data":"7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d"} Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.824848 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" containerName="glance-log" containerID="cri-o://1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff" gracePeriod=30 Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.825019 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" containerName="glance-httpd" containerID="cri-o://7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d" gracePeriod=30 Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.871034 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" podStartSLOduration=8.871015269 podStartE2EDuration="8.871015269s" podCreationTimestamp="2025-11-25 09:22:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:22:46.828789216 +0000 UTC m=+1161.882428944" watchObservedRunningTime="2025-11-25 09:22:46.871015269 +0000 UTC m=+1161.924654987" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.918284 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-86977dc76f-6cpw4"] Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.942287 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-4zwjp"] Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.955196 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=8.955177698 podStartE2EDuration="8.955177698s" podCreationTimestamp="2025-11-25 09:22:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:22:46.906277852 +0000 UTC m=+1161.959917570" watchObservedRunningTime="2025-11-25 09:22:46.955177698 +0000 UTC m=+1162.008817416" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.960879 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-4zwjp"] Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.991309 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7dc8446cb-d6wz7"] Nov 25 09:22:46 crc kubenswrapper[4687]: E1125 09:22:46.991725 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df7139f0-f8b5-41bc-95f3-6839dcb3231b" containerName="init" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.991743 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="df7139f0-f8b5-41bc-95f3-6839dcb3231b" containerName="init" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.991999 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="df7139f0-f8b5-41bc-95f3-6839dcb3231b" containerName="init" Nov 25 09:22:46 crc kubenswrapper[4687]: I1125 09:22:46.993295 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.004033 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7dc8446cb-d6wz7"] Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.006120 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.009140 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5b5c8fb8f9-zv6j6"] Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.024403 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-69b7bcc78d-r6t7q"] Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.025899 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.027041 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-69b7bcc78d-r6t7q"] Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.115583 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f22d\" (UniqueName: \"kubernetes.io/projected/ea89f490-5a54-46eb-9b1c-1eb96dd181da-kube-api-access-2f22d\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.115673 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-combined-ca-bundle\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.115717 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea89f490-5a54-46eb-9b1c-1eb96dd181da-logs\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.115737 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-tls-certs\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.116087 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-config-data\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.116208 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-secret-key\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.116244 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-combined-ca-bundle\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.116277 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-config-data\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.116297 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xd6v4\" (UniqueName: \"kubernetes.io/projected/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-kube-api-access-xd6v4\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.116377 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-horizon-secret-key\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.116479 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-scripts\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.116526 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-scripts\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.116633 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-logs\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.116823 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-horizon-tls-certs\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220084 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-logs\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220172 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-horizon-tls-certs\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220259 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f22d\" (UniqueName: \"kubernetes.io/projected/ea89f490-5a54-46eb-9b1c-1eb96dd181da-kube-api-access-2f22d\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220288 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-combined-ca-bundle\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220335 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea89f490-5a54-46eb-9b1c-1eb96dd181da-logs\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220361 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-tls-certs\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220435 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-config-data\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220487 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-secret-key\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220536 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-combined-ca-bundle\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220562 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-config-data\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220577 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xd6v4\" (UniqueName: \"kubernetes.io/projected/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-kube-api-access-xd6v4\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220646 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-horizon-secret-key\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220723 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-scripts\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.220763 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-scripts\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.221029 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-logs\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.221826 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-scripts\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.223412 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea89f490-5a54-46eb-9b1c-1eb96dd181da-logs\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.223482 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-config-data\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.224010 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-scripts\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.224769 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-config-data\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.237340 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-combined-ca-bundle\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.241074 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-horizon-secret-key\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.253921 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xd6v4\" (UniqueName: \"kubernetes.io/projected/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-kube-api-access-xd6v4\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.254526 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-tls-certs\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.254527 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4c7abdb-6d41-42c3-a228-27ebd825e7b5-horizon-tls-certs\") pod \"horizon-69b7bcc78d-r6t7q\" (UID: \"e4c7abdb-6d41-42c3-a228-27ebd825e7b5\") " pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.254823 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-secret-key\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.257427 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f22d\" (UniqueName: \"kubernetes.io/projected/ea89f490-5a54-46eb-9b1c-1eb96dd181da-kube-api-access-2f22d\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.257597 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-combined-ca-bundle\") pod \"horizon-7dc8446cb-d6wz7\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.381659 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.387158 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.734045 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.820451 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df7139f0-f8b5-41bc-95f3-6839dcb3231b" path="/var/lib/kubelet/pods/df7139f0-f8b5-41bc-95f3-6839dcb3231b/volumes" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.829896 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-internal-tls-certs\") pod \"8ad986b1-c02f-4462-9330-0acba7d7001f\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.829963 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-combined-ca-bundle\") pod \"8ad986b1-c02f-4462-9330-0acba7d7001f\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.829993 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-config-data\") pod \"8ad986b1-c02f-4462-9330-0acba7d7001f\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.830159 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hmnl\" (UniqueName: \"kubernetes.io/projected/8ad986b1-c02f-4462-9330-0acba7d7001f-kube-api-access-9hmnl\") pod \"8ad986b1-c02f-4462-9330-0acba7d7001f\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.830230 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-httpd-run\") pod \"8ad986b1-c02f-4462-9330-0acba7d7001f\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.830254 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-scripts\") pod \"8ad986b1-c02f-4462-9330-0acba7d7001f\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.830278 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-logs\") pod \"8ad986b1-c02f-4462-9330-0acba7d7001f\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.830334 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"8ad986b1-c02f-4462-9330-0acba7d7001f\" (UID: \"8ad986b1-c02f-4462-9330-0acba7d7001f\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.836108 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8ad986b1-c02f-4462-9330-0acba7d7001f" (UID: "8ad986b1-c02f-4462-9330-0acba7d7001f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.836205 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-logs" (OuterVolumeSpecName: "logs") pod "8ad986b1-c02f-4462-9330-0acba7d7001f" (UID: "8ad986b1-c02f-4462-9330-0acba7d7001f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.848144 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-scripts" (OuterVolumeSpecName: "scripts") pod "8ad986b1-c02f-4462-9330-0acba7d7001f" (UID: "8ad986b1-c02f-4462-9330-0acba7d7001f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.857894 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "8ad986b1-c02f-4462-9330-0acba7d7001f" (UID: "8ad986b1-c02f-4462-9330-0acba7d7001f"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.864653 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ad986b1-c02f-4462-9330-0acba7d7001f-kube-api-access-9hmnl" (OuterVolumeSpecName: "kube-api-access-9hmnl") pod "8ad986b1-c02f-4462-9330-0acba7d7001f" (UID: "8ad986b1-c02f-4462-9330-0acba7d7001f"). InnerVolumeSpecName "kube-api-access-9hmnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.868016 4687 generic.go:334] "Generic (PLEG): container finished" podID="8ad986b1-c02f-4462-9330-0acba7d7001f" containerID="44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174" exitCode=0 Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.868047 4687 generic.go:334] "Generic (PLEG): container finished" podID="8ad986b1-c02f-4462-9330-0acba7d7001f" containerID="0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22" exitCode=143 Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.868098 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8ad986b1-c02f-4462-9330-0acba7d7001f","Type":"ContainerDied","Data":"44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174"} Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.868129 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8ad986b1-c02f-4462-9330-0acba7d7001f","Type":"ContainerDied","Data":"0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22"} Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.868143 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8ad986b1-c02f-4462-9330-0acba7d7001f","Type":"ContainerDied","Data":"c887f83645c193562a9f7b2094ce0e118360c452ae255dc0e9c2883c1f9f3972"} Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.868162 4687 scope.go:117] "RemoveContainer" containerID="44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.868312 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.868710 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.880989 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ad986b1-c02f-4462-9330-0acba7d7001f" (UID: "8ad986b1-c02f-4462-9330-0acba7d7001f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.887354 4687 generic.go:334] "Generic (PLEG): container finished" podID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" containerID="7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d" exitCode=0 Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.887391 4687 generic.go:334] "Generic (PLEG): container finished" podID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" containerID="1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff" exitCode=143 Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.888267 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.888448 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515","Type":"ContainerDied","Data":"7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d"} Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.888493 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515","Type":"ContainerDied","Data":"1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff"} Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.913226 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8ad986b1-c02f-4462-9330-0acba7d7001f" (UID: "8ad986b1-c02f-4462-9330-0acba7d7001f"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.933335 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-combined-ca-bundle\") pod \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.933405 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-scripts\") pod \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.933428 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxnx5\" (UniqueName: \"kubernetes.io/projected/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-kube-api-access-wxnx5\") pod \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.933598 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-logs\") pod \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.933647 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-httpd-run\") pod \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.933675 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-public-tls-certs\") pod \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.933704 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.933729 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-config-data\") pod \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\" (UID: \"c7eb5a4e-4a09-40c8-9fd1-270bb7bee515\") " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.937763 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hmnl\" (UniqueName: \"kubernetes.io/projected/8ad986b1-c02f-4462-9330-0acba7d7001f-kube-api-access-9hmnl\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.938091 4687 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.938191 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.938276 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ad986b1-c02f-4462-9330-0acba7d7001f-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.938382 4687 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.938477 4687 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.938601 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.943032 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-logs" (OuterVolumeSpecName: "logs") pod "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" (UID: "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.943992 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-config-data" (OuterVolumeSpecName: "config-data") pod "8ad986b1-c02f-4462-9330-0acba7d7001f" (UID: "8ad986b1-c02f-4462-9330-0acba7d7001f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.945964 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" (UID: "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.951062 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-scripts" (OuterVolumeSpecName: "scripts") pod "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" (UID: "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.952915 4687 scope.go:117] "RemoveContainer" containerID="0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.955065 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-kube-api-access-wxnx5" (OuterVolumeSpecName: "kube-api-access-wxnx5") pod "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" (UID: "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515"). InnerVolumeSpecName "kube-api-access-wxnx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.959014 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" (UID: "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.972373 4687 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 25 09:22:47 crc kubenswrapper[4687]: I1125 09:22:47.980537 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" (UID: "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.010860 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-config-data" (OuterVolumeSpecName: "config-data") pod "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" (UID: "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.014965 4687 scope.go:117] "RemoveContainer" containerID="44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174" Nov 25 09:22:48 crc kubenswrapper[4687]: E1125 09:22:48.015650 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174\": container with ID starting with 44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174 not found: ID does not exist" containerID="44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.015689 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174"} err="failed to get container status \"44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174\": rpc error: code = NotFound desc = could not find container \"44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174\": container with ID starting with 44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174 not found: ID does not exist" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.015714 4687 scope.go:117] "RemoveContainer" containerID="0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22" Nov 25 09:22:48 crc kubenswrapper[4687]: E1125 09:22:48.017228 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22\": container with ID starting with 0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22 not found: ID does not exist" containerID="0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.017251 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22"} err="failed to get container status \"0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22\": rpc error: code = NotFound desc = could not find container \"0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22\": container with ID starting with 0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22 not found: ID does not exist" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.017268 4687 scope.go:117] "RemoveContainer" containerID="44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.017480 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174"} err="failed to get container status \"44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174\": rpc error: code = NotFound desc = could not find container \"44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174\": container with ID starting with 44192e9f1c0dda9405f25a602766ea733e9d347fce38b2df55d11041e8f29174 not found: ID does not exist" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.017494 4687 scope.go:117] "RemoveContainer" containerID="0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.017877 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22"} err="failed to get container status \"0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22\": rpc error: code = NotFound desc = could not find container \"0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22\": container with ID starting with 0f30f8ced2d13aba947e69e45e04406f085b8f932813adbe518f4c83a63e0c22 not found: ID does not exist" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.017906 4687 scope.go:117] "RemoveContainer" containerID="7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.036430 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" (UID: "c7eb5a4e-4a09-40c8-9fd1-270bb7bee515"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.043121 4687 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.043163 4687 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.043174 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.043183 4687 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.043192 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.043200 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.043209 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxnx5\" (UniqueName: \"kubernetes.io/projected/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-kube-api-access-wxnx5\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.043218 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ad986b1-c02f-4462-9330-0acba7d7001f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.043226 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.043233 4687 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.060442 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-69b7bcc78d-r6t7q"] Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.070245 4687 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 25 09:22:48 crc kubenswrapper[4687]: W1125 09:22:48.074189 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4c7abdb_6d41_42c3_a228_27ebd825e7b5.slice/crio-802ca3afa0cd026a53357e1fae8d8878afc0599cf6c706017dce9f4b0058cc09 WatchSource:0}: Error finding container 802ca3afa0cd026a53357e1fae8d8878afc0599cf6c706017dce9f4b0058cc09: Status 404 returned error can't find the container with id 802ca3afa0cd026a53357e1fae8d8878afc0599cf6c706017dce9f4b0058cc09 Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.146530 4687 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.155044 4687 scope.go:117] "RemoveContainer" containerID="1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.176645 4687 scope.go:117] "RemoveContainer" containerID="7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d" Nov 25 09:22:48 crc kubenswrapper[4687]: E1125 09:22:48.177061 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d\": container with ID starting with 7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d not found: ID does not exist" containerID="7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.177103 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d"} err="failed to get container status \"7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d\": rpc error: code = NotFound desc = could not find container \"7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d\": container with ID starting with 7267b473112636f00858bc363e90caf0cea0980e80ca5191e8d357fb85ccb40d not found: ID does not exist" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.177131 4687 scope.go:117] "RemoveContainer" containerID="1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff" Nov 25 09:22:48 crc kubenswrapper[4687]: E1125 09:22:48.177691 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff\": container with ID starting with 1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff not found: ID does not exist" containerID="1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.177721 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff"} err="failed to get container status \"1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff\": rpc error: code = NotFound desc = could not find container \"1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff\": container with ID starting with 1c6d84aeba1359b6c0715e3335d89d1c0f81f0f35606ab3c943d0f1978e88dff not found: ID does not exist" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.214150 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7dc8446cb-d6wz7"] Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.250519 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:22:48 crc kubenswrapper[4687]: W1125 09:22:48.255890 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea89f490_5a54_46eb_9b1c_1eb96dd181da.slice/crio-edf5b834f44317b053e8fb14665d28bd1dcb752f41fadafeaf32f4dabc68b005 WatchSource:0}: Error finding container edf5b834f44317b053e8fb14665d28bd1dcb752f41fadafeaf32f4dabc68b005: Status 404 returned error can't find the container with id edf5b834f44317b053e8fb14665d28bd1dcb752f41fadafeaf32f4dabc68b005 Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.282144 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.292145 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:22:48 crc kubenswrapper[4687]: E1125 09:22:48.292564 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ad986b1-c02f-4462-9330-0acba7d7001f" containerName="glance-httpd" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.292578 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ad986b1-c02f-4462-9330-0acba7d7001f" containerName="glance-httpd" Nov 25 09:22:48 crc kubenswrapper[4687]: E1125 09:22:48.292600 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ad986b1-c02f-4462-9330-0acba7d7001f" containerName="glance-log" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.292606 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ad986b1-c02f-4462-9330-0acba7d7001f" containerName="glance-log" Nov 25 09:22:48 crc kubenswrapper[4687]: E1125 09:22:48.292614 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" containerName="glance-httpd" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.292620 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" containerName="glance-httpd" Nov 25 09:22:48 crc kubenswrapper[4687]: E1125 09:22:48.292629 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" containerName="glance-log" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.292634 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" containerName="glance-log" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.292800 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" containerName="glance-log" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.292815 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ad986b1-c02f-4462-9330-0acba7d7001f" containerName="glance-log" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.292827 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ad986b1-c02f-4462-9330-0acba7d7001f" containerName="glance-httpd" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.292846 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" containerName="glance-httpd" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.293832 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.295944 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rggzb" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.297800 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.297946 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.297853 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.310783 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.326032 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.340582 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.350045 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.352248 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.352463 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.353091 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.353134 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.353320 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.353349 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.353369 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.353394 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xd2bx\" (UniqueName: \"kubernetes.io/projected/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-kube-api-access-xd2bx\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.353412 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.355107 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.355339 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.365207 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455433 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-logs\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455490 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455586 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455629 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-scripts\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455647 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455693 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455712 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455729 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455776 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n5bv\" (UniqueName: \"kubernetes.io/projected/639af5c1-faaf-45e2-b75e-6031913ffdb9-kube-api-access-6n5bv\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455798 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xd2bx\" (UniqueName: \"kubernetes.io/projected/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-kube-api-access-xd2bx\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455815 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455864 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.455985 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.456031 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-config-data\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.456056 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.456087 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.456106 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.456558 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.457032 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.462770 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.471117 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.471824 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.472681 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.474315 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xd2bx\" (UniqueName: \"kubernetes.io/projected/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-kube-api-access-xd2bx\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.494363 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.558710 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-logs\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.558786 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.558866 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-scripts\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.558896 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.558974 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.559037 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n5bv\" (UniqueName: \"kubernetes.io/projected/639af5c1-faaf-45e2-b75e-6031913ffdb9-kube-api-access-6n5bv\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.559093 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.559153 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-config-data\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.564117 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-config-data\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.564474 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-logs\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.564859 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.568318 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.568923 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-scripts\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.573264 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.581813 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.594838 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n5bv\" (UniqueName: \"kubernetes.io/projected/639af5c1-faaf-45e2-b75e-6031913ffdb9-kube-api-access-6n5bv\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.603981 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.625029 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.675314 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.908540 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69b7bcc78d-r6t7q" event={"ID":"e4c7abdb-6d41-42c3-a228-27ebd825e7b5","Type":"ContainerStarted","Data":"802ca3afa0cd026a53357e1fae8d8878afc0599cf6c706017dce9f4b0058cc09"} Nov 25 09:22:48 crc kubenswrapper[4687]: I1125 09:22:48.910740 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7dc8446cb-d6wz7" event={"ID":"ea89f490-5a54-46eb-9b1c-1eb96dd181da","Type":"ContainerStarted","Data":"edf5b834f44317b053e8fb14665d28bd1dcb752f41fadafeaf32f4dabc68b005"} Nov 25 09:22:49 crc kubenswrapper[4687]: I1125 09:22:49.215954 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:22:49 crc kubenswrapper[4687]: I1125 09:22:49.332113 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:22:49 crc kubenswrapper[4687]: I1125 09:22:49.755241 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ad986b1-c02f-4462-9330-0acba7d7001f" path="/var/lib/kubelet/pods/8ad986b1-c02f-4462-9330-0acba7d7001f/volumes" Nov 25 09:22:49 crc kubenswrapper[4687]: I1125 09:22:49.756343 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7eb5a4e-4a09-40c8-9fd1-270bb7bee515" path="/var/lib/kubelet/pods/c7eb5a4e-4a09-40c8-9fd1-270bb7bee515/volumes" Nov 25 09:22:49 crc kubenswrapper[4687]: I1125 09:22:49.938568 4687 generic.go:334] "Generic (PLEG): container finished" podID="95185182-622d-4bad-866e-80054cb0780a" containerID="dfc8332f355812154f4cd88cae810d7278e3be5e5005c77d3039c8bda72bab8c" exitCode=0 Nov 25 09:22:49 crc kubenswrapper[4687]: I1125 09:22:49.938625 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-sb55x" event={"ID":"95185182-622d-4bad-866e-80054cb0780a","Type":"ContainerDied","Data":"dfc8332f355812154f4cd88cae810d7278e3be5e5005c77d3039c8bda72bab8c"} Nov 25 09:22:51 crc kubenswrapper[4687]: I1125 09:22:51.957139 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b","Type":"ContainerStarted","Data":"dcc7272a63db41f14f73046d8acbf7505f24845e363bf61f9cb7a74003d2dea4"} Nov 25 09:22:53 crc kubenswrapper[4687]: I1125 09:22:53.896909 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:22:53 crc kubenswrapper[4687]: I1125 09:22:53.970644 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-qrhc5"] Nov 25 09:22:53 crc kubenswrapper[4687]: I1125 09:22:53.970861 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerName="dnsmasq-dns" containerID="cri-o://cb396f0172c75527c46056c0b6b6df0e78d69812ee665e53aef312be06664a72" gracePeriod=10 Nov 25 09:22:55 crc kubenswrapper[4687]: I1125 09:22:55.001845 4687 generic.go:334] "Generic (PLEG): container finished" podID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerID="cb396f0172c75527c46056c0b6b6df0e78d69812ee665e53aef312be06664a72" exitCode=0 Nov 25 09:22:55 crc kubenswrapper[4687]: I1125 09:22:55.001929 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" event={"ID":"5796267d-68c1-4f9f-bffe-0edaba3fa4d1","Type":"ContainerDied","Data":"cb396f0172c75527c46056c0b6b6df0e78d69812ee665e53aef312be06664a72"} Nov 25 09:22:56 crc kubenswrapper[4687]: W1125 09:22:56.824737 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod639af5c1_faaf_45e2_b75e_6031913ffdb9.slice/crio-8990c357f1ae29b2219db77c6c98c0ac63c0eb7b1bcb11e8c46ccc9edc4d4a74 WatchSource:0}: Error finding container 8990c357f1ae29b2219db77c6c98c0ac63c0eb7b1bcb11e8c46ccc9edc4d4a74: Status 404 returned error can't find the container with id 8990c357f1ae29b2219db77c6c98c0ac63c0eb7b1bcb11e8c46ccc9edc4d4a74 Nov 25 09:22:56 crc kubenswrapper[4687]: I1125 09:22:56.995294 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.125:5353: connect: connection refused" Nov 25 09:22:57 crc kubenswrapper[4687]: I1125 09:22:57.022112 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"639af5c1-faaf-45e2-b75e-6031913ffdb9","Type":"ContainerStarted","Data":"8990c357f1ae29b2219db77c6c98c0ac63c0eb7b1bcb11e8c46ccc9edc4d4a74"} Nov 25 09:23:01 crc kubenswrapper[4687]: I1125 09:23:01.995236 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.125:5353: connect: connection refused" Nov 25 09:23:02 crc kubenswrapper[4687]: E1125 09:23:02.291587 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 25 09:23:02 crc kubenswrapper[4687]: E1125 09:23:02.291787 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d7hd5hb4h5cdh54fh675hb8h655h588h95h685h599hf7h75h576h674h5c7h55bh57hc4hd6hc9h67bhf7hc5h6h688h68h67h9bh587h59q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5fgh5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(09662da0-b802-43c3-9c8e-4c9e951bdd7f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:23:06 crc kubenswrapper[4687]: I1125 09:23:06.994708 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.125:5353: connect: connection refused" Nov 25 09:23:06 crc kubenswrapper[4687]: I1125 09:23:06.995329 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:23:09 crc kubenswrapper[4687]: I1125 09:23:09.802606 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:23:09 crc kubenswrapper[4687]: I1125 09:23:09.987722 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-config-data\") pod \"95185182-622d-4bad-866e-80054cb0780a\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " Nov 25 09:23:09 crc kubenswrapper[4687]: I1125 09:23:09.987830 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-credential-keys\") pod \"95185182-622d-4bad-866e-80054cb0780a\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " Nov 25 09:23:09 crc kubenswrapper[4687]: I1125 09:23:09.987872 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-fernet-keys\") pod \"95185182-622d-4bad-866e-80054cb0780a\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " Nov 25 09:23:09 crc kubenswrapper[4687]: I1125 09:23:09.987988 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-scripts\") pod \"95185182-622d-4bad-866e-80054cb0780a\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " Nov 25 09:23:09 crc kubenswrapper[4687]: I1125 09:23:09.988035 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsg6s\" (UniqueName: \"kubernetes.io/projected/95185182-622d-4bad-866e-80054cb0780a-kube-api-access-rsg6s\") pod \"95185182-622d-4bad-866e-80054cb0780a\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " Nov 25 09:23:09 crc kubenswrapper[4687]: I1125 09:23:09.988158 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-combined-ca-bundle\") pod \"95185182-622d-4bad-866e-80054cb0780a\" (UID: \"95185182-622d-4bad-866e-80054cb0780a\") " Nov 25 09:23:09 crc kubenswrapper[4687]: I1125 09:23:09.997655 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "95185182-622d-4bad-866e-80054cb0780a" (UID: "95185182-622d-4bad-866e-80054cb0780a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:09 crc kubenswrapper[4687]: I1125 09:23:09.998337 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95185182-622d-4bad-866e-80054cb0780a-kube-api-access-rsg6s" (OuterVolumeSpecName: "kube-api-access-rsg6s") pod "95185182-622d-4bad-866e-80054cb0780a" (UID: "95185182-622d-4bad-866e-80054cb0780a"). InnerVolumeSpecName "kube-api-access-rsg6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.010013 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-scripts" (OuterVolumeSpecName: "scripts") pod "95185182-622d-4bad-866e-80054cb0780a" (UID: "95185182-622d-4bad-866e-80054cb0780a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.010195 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "95185182-622d-4bad-866e-80054cb0780a" (UID: "95185182-622d-4bad-866e-80054cb0780a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.018482 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "95185182-622d-4bad-866e-80054cb0780a" (UID: "95185182-622d-4bad-866e-80054cb0780a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.020741 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-config-data" (OuterVolumeSpecName: "config-data") pod "95185182-622d-4bad-866e-80054cb0780a" (UID: "95185182-622d-4bad-866e-80054cb0780a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.090056 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.090084 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.090093 4687 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.090101 4687 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.090111 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/95185182-622d-4bad-866e-80054cb0780a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.090120 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsg6s\" (UniqueName: \"kubernetes.io/projected/95185182-622d-4bad-866e-80054cb0780a-kube-api-access-rsg6s\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.149912 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-sb55x" event={"ID":"95185182-622d-4bad-866e-80054cb0780a","Type":"ContainerDied","Data":"9392771a48fa67855903dd907e455ff3344e481a56e580525460b47385dc998e"} Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.150283 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9392771a48fa67855903dd907e455ff3344e481a56e580525460b47385dc998e" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.149962 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-sb55x" Nov 25 09:23:10 crc kubenswrapper[4687]: I1125 09:23:10.997817 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-sb55x"] Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.005460 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-sb55x"] Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.092807 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-lvpd8"] Nov 25 09:23:11 crc kubenswrapper[4687]: E1125 09:23:11.093252 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95185182-622d-4bad-866e-80054cb0780a" containerName="keystone-bootstrap" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.093269 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="95185182-622d-4bad-866e-80054cb0780a" containerName="keystone-bootstrap" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.093992 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="95185182-622d-4bad-866e-80054cb0780a" containerName="keystone-bootstrap" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.094716 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.096290 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.097453 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.097546 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.097760 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-l777p" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.098207 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.103234 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lvpd8"] Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.108190 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-config-data\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.108264 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-credential-keys\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.108324 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-scripts\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.108387 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-fernet-keys\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.108410 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-combined-ca-bundle\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.108429 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clbh2\" (UniqueName: \"kubernetes.io/projected/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-kube-api-access-clbh2\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.209660 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-config-data\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.209706 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-credential-keys\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.209771 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-scripts\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.209856 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-fernet-keys\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.209887 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-combined-ca-bundle\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.209915 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clbh2\" (UniqueName: \"kubernetes.io/projected/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-kube-api-access-clbh2\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.215132 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-scripts\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.216443 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-credential-keys\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.217940 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-config-data\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.219143 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-fernet-keys\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.223022 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-combined-ca-bundle\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.227392 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clbh2\" (UniqueName: \"kubernetes.io/projected/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-kube-api-access-clbh2\") pod \"keystone-bootstrap-lvpd8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.419375 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:11 crc kubenswrapper[4687]: I1125 09:23:11.750240 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95185182-622d-4bad-866e-80054cb0780a" path="/var/lib/kubelet/pods/95185182-622d-4bad-866e-80054cb0780a/volumes" Nov 25 09:23:12 crc kubenswrapper[4687]: E1125 09:23:12.924838 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 25 09:23:12 crc kubenswrapper[4687]: E1125 09:23:12.925181 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pzvlh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-2h86j_openstack(53681654-97b6-4586-ba53-8b6b018e04fa): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:23:12 crc kubenswrapper[4687]: E1125 09:23:12.926367 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-2h86j" podUID="53681654-97b6-4586-ba53-8b6b018e04fa" Nov 25 09:23:12 crc kubenswrapper[4687]: I1125 09:23:12.978004 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.142268 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-nb\") pod \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.142392 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-config\") pod \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.142434 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-swift-storage-0\") pod \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.142460 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-sb\") pod \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.142593 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-svc\") pod \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.142621 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5xzn\" (UniqueName: \"kubernetes.io/projected/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-kube-api-access-w5xzn\") pod \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\" (UID: \"5796267d-68c1-4f9f-bffe-0edaba3fa4d1\") " Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.151170 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-kube-api-access-w5xzn" (OuterVolumeSpecName: "kube-api-access-w5xzn") pod "5796267d-68c1-4f9f-bffe-0edaba3fa4d1" (UID: "5796267d-68c1-4f9f-bffe-0edaba3fa4d1"). InnerVolumeSpecName "kube-api-access-w5xzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.178662 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.178831 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" event={"ID":"5796267d-68c1-4f9f-bffe-0edaba3fa4d1","Type":"ContainerDied","Data":"29d13f0485a553facd36014a37a22dcc44259819a18baa94bc3f9bffaa6e981a"} Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.178866 4687 scope.go:117] "RemoveContainer" containerID="cb396f0172c75527c46056c0b6b6df0e78d69812ee665e53aef312be06664a72" Nov 25 09:23:13 crc kubenswrapper[4687]: E1125 09:23:13.182829 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-2h86j" podUID="53681654-97b6-4586-ba53-8b6b018e04fa" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.194794 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5796267d-68c1-4f9f-bffe-0edaba3fa4d1" (UID: "5796267d-68c1-4f9f-bffe-0edaba3fa4d1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.221077 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5796267d-68c1-4f9f-bffe-0edaba3fa4d1" (UID: "5796267d-68c1-4f9f-bffe-0edaba3fa4d1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.224601 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-config" (OuterVolumeSpecName: "config") pod "5796267d-68c1-4f9f-bffe-0edaba3fa4d1" (UID: "5796267d-68c1-4f9f-bffe-0edaba3fa4d1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.232816 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5796267d-68c1-4f9f-bffe-0edaba3fa4d1" (UID: "5796267d-68c1-4f9f-bffe-0edaba3fa4d1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.240473 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5796267d-68c1-4f9f-bffe-0edaba3fa4d1" (UID: "5796267d-68c1-4f9f-bffe-0edaba3fa4d1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.245313 4687 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.245345 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.245355 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.245363 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5xzn\" (UniqueName: \"kubernetes.io/projected/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-kube-api-access-w5xzn\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.245374 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.245383 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5796267d-68c1-4f9f-bffe-0edaba3fa4d1-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.517763 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-qrhc5"] Nov 25 09:23:13 crc kubenswrapper[4687]: E1125 09:23:13.525261 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 25 09:23:13 crc kubenswrapper[4687]: E1125 09:23:13.525417 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gxt6v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-8b7k2_openstack(5d974fcb-fbad-4f24-9857-a791205029a0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:23:13 crc kubenswrapper[4687]: E1125 09:23:13.526583 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-8b7k2" podUID="5d974fcb-fbad-4f24-9857-a791205029a0" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.527432 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-qrhc5"] Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.744083 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" path="/var/lib/kubelet/pods/5796267d-68c1-4f9f-bffe-0edaba3fa4d1/volumes" Nov 25 09:23:13 crc kubenswrapper[4687]: I1125 09:23:13.804955 4687 scope.go:117] "RemoveContainer" containerID="995b3ce25f38495565ac59d33ebf10d399b911a1013b1b0ff5158232384136b7" Nov 25 09:23:14 crc kubenswrapper[4687]: E1125 09:23:14.192145 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-8b7k2" podUID="5d974fcb-fbad-4f24-9857-a791205029a0" Nov 25 09:23:14 crc kubenswrapper[4687]: I1125 09:23:14.233220 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lvpd8"] Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.203708 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09662da0-b802-43c3-9c8e-4c9e951bdd7f","Type":"ContainerStarted","Data":"4b85b4fc296a4ea78929c1a8a57a40cfa5ce2c00d86d703f758a339b8331c829"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.214652 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7dc8446cb-d6wz7" event={"ID":"ea89f490-5a54-46eb-9b1c-1eb96dd181da","Type":"ContainerStarted","Data":"de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.214701 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7dc8446cb-d6wz7" event={"ID":"ea89f490-5a54-46eb-9b1c-1eb96dd181da","Type":"ContainerStarted","Data":"104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.219897 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gpc68" event={"ID":"7c2bb808-f45c-4126-94b9-36187402c9d7","Type":"ContainerStarted","Data":"6f4f84f88751424025dafc2832f22dc1a206b03fc6b76ff91fdac97f54082182"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.222966 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b5c8fb8f9-zv6j6" event={"ID":"5df3d575-c461-43d1-af22-d9f15aaf06b7","Type":"ContainerStarted","Data":"17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.223010 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b5c8fb8f9-zv6j6" event={"ID":"5df3d575-c461-43d1-af22-d9f15aaf06b7","Type":"ContainerStarted","Data":"ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.223135 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5b5c8fb8f9-zv6j6" podUID="5df3d575-c461-43d1-af22-d9f15aaf06b7" containerName="horizon-log" containerID="cri-o://ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7" gracePeriod=30 Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.228265 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5b5c8fb8f9-zv6j6" podUID="5df3d575-c461-43d1-af22-d9f15aaf06b7" containerName="horizon" containerID="cri-o://17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6" gracePeriod=30 Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.237610 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7dc8446cb-d6wz7" podStartSLOduration=3.992632588 podStartE2EDuration="29.237581818s" podCreationTimestamp="2025-11-25 09:22:46 +0000 UTC" firstStartedPulling="2025-11-25 09:22:48.274461931 +0000 UTC m=+1163.328101649" lastFinishedPulling="2025-11-25 09:23:13.519411161 +0000 UTC m=+1188.573050879" observedRunningTime="2025-11-25 09:23:15.236076007 +0000 UTC m=+1190.289715725" watchObservedRunningTime="2025-11-25 09:23:15.237581818 +0000 UTC m=+1190.291221536" Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.283203 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b","Type":"ContainerStarted","Data":"8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.287215 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5788b95877-j7l5l" event={"ID":"41b80a26-d97f-4344-8489-cfa0dbbaf99f","Type":"ContainerStarted","Data":"efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.287261 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5788b95877-j7l5l" event={"ID":"41b80a26-d97f-4344-8489-cfa0dbbaf99f","Type":"ContainerStarted","Data":"488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.287393 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5788b95877-j7l5l" podUID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" containerName="horizon-log" containerID="cri-o://488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac" gracePeriod=30 Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.287838 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5788b95877-j7l5l" podUID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" containerName="horizon" containerID="cri-o://efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc" gracePeriod=30 Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.299297 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5b5c8fb8f9-zv6j6" podStartSLOduration=2.4708659 podStartE2EDuration="35.299272623s" podCreationTimestamp="2025-11-25 09:22:40 +0000 UTC" firstStartedPulling="2025-11-25 09:22:40.993433178 +0000 UTC m=+1156.047072896" lastFinishedPulling="2025-11-25 09:23:13.821839901 +0000 UTC m=+1188.875479619" observedRunningTime="2025-11-25 09:23:15.258453809 +0000 UTC m=+1190.312093537" watchObservedRunningTime="2025-11-25 09:23:15.299272623 +0000 UTC m=+1190.352912341" Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.303787 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"639af5c1-faaf-45e2-b75e-6031913ffdb9","Type":"ContainerStarted","Data":"48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.308715 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-gpc68" podStartSLOduration=3.326406794 podStartE2EDuration="37.308695681s" podCreationTimestamp="2025-11-25 09:22:38 +0000 UTC" firstStartedPulling="2025-11-25 09:22:39.522132674 +0000 UTC m=+1154.575772392" lastFinishedPulling="2025-11-25 09:23:13.504421561 +0000 UTC m=+1188.558061279" observedRunningTime="2025-11-25 09:23:15.279782131 +0000 UTC m=+1190.333421879" watchObservedRunningTime="2025-11-25 09:23:15.308695681 +0000 UTC m=+1190.362335399" Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.325171 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86977dc76f-6cpw4" event={"ID":"d875a4e8-0091-41cb-9a53-2cd74bdf853c","Type":"ContainerStarted","Data":"e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.325380 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86977dc76f-6cpw4" event={"ID":"d875a4e8-0091-41cb-9a53-2cd74bdf853c","Type":"ContainerStarted","Data":"86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.325575 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-86977dc76f-6cpw4" podUID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" containerName="horizon-log" containerID="cri-o://86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d" gracePeriod=30 Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.325710 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-86977dc76f-6cpw4" podUID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" containerName="horizon" containerID="cri-o://e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2" gracePeriod=30 Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.327378 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5788b95877-j7l5l" podStartSLOduration=3.701215337 podStartE2EDuration="38.3273591s" podCreationTimestamp="2025-11-25 09:22:37 +0000 UTC" firstStartedPulling="2025-11-25 09:22:39.198406922 +0000 UTC m=+1154.252046640" lastFinishedPulling="2025-11-25 09:23:13.824550695 +0000 UTC m=+1188.878190403" observedRunningTime="2025-11-25 09:23:15.315254249 +0000 UTC m=+1190.368893967" watchObservedRunningTime="2025-11-25 09:23:15.3273591 +0000 UTC m=+1190.380998818" Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.332638 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69b7bcc78d-r6t7q" event={"ID":"e4c7abdb-6d41-42c3-a228-27ebd825e7b5","Type":"ContainerStarted","Data":"9169b5cbdc76562e9a63807b2dcf221e9a5328109b8dd2e284a872ff2bfa538c"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.332699 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69b7bcc78d-r6t7q" event={"ID":"e4c7abdb-6d41-42c3-a228-27ebd825e7b5","Type":"ContainerStarted","Data":"7f896d771d0d37693337bede44111caa599963877ae78af910061bb5ead1cd95"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.349442 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=27.349422073 podStartE2EDuration="27.349422073s" podCreationTimestamp="2025-11-25 09:22:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:15.336947962 +0000 UTC m=+1190.390587680" watchObservedRunningTime="2025-11-25 09:23:15.349422073 +0000 UTC m=+1190.403061781" Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.351022 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lvpd8" event={"ID":"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8","Type":"ContainerStarted","Data":"cb2dd5ea0c36e152c7d609cbb53dc52ee7945b0dcff8874679650e5b3a9342c0"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.351138 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lvpd8" event={"ID":"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8","Type":"ContainerStarted","Data":"481ef1e73e36aa4571cf96ee48fc7092e09a6030d08641d20c8837ebfb00c12d"} Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.359468 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-86977dc76f-6cpw4" podStartSLOduration=3.359157637 podStartE2EDuration="37.359455537s" podCreationTimestamp="2025-11-25 09:22:38 +0000 UTC" firstStartedPulling="2025-11-25 09:22:39.504972775 +0000 UTC m=+1154.558612493" lastFinishedPulling="2025-11-25 09:23:13.505270675 +0000 UTC m=+1188.558910393" observedRunningTime="2025-11-25 09:23:15.358041148 +0000 UTC m=+1190.411680866" watchObservedRunningTime="2025-11-25 09:23:15.359455537 +0000 UTC m=+1190.413095255" Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.395311 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-69b7bcc78d-r6t7q" podStartSLOduration=3.66431455 podStartE2EDuration="29.395286776s" podCreationTimestamp="2025-11-25 09:22:46 +0000 UTC" firstStartedPulling="2025-11-25 09:22:48.083638318 +0000 UTC m=+1163.137278036" lastFinishedPulling="2025-11-25 09:23:13.814610504 +0000 UTC m=+1188.868250262" observedRunningTime="2025-11-25 09:23:15.387097952 +0000 UTC m=+1190.440737680" watchObservedRunningTime="2025-11-25 09:23:15.395286776 +0000 UTC m=+1190.448926494" Nov 25 09:23:15 crc kubenswrapper[4687]: I1125 09:23:15.411765 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-lvpd8" podStartSLOduration=4.411743485 podStartE2EDuration="4.411743485s" podCreationTimestamp="2025-11-25 09:23:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:15.411171209 +0000 UTC m=+1190.464810927" watchObservedRunningTime="2025-11-25 09:23:15.411743485 +0000 UTC m=+1190.465383203" Nov 25 09:23:16 crc kubenswrapper[4687]: I1125 09:23:16.364601 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b","Type":"ContainerStarted","Data":"bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a"} Nov 25 09:23:16 crc kubenswrapper[4687]: I1125 09:23:16.370190 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"639af5c1-faaf-45e2-b75e-6031913ffdb9","Type":"ContainerStarted","Data":"37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff"} Nov 25 09:23:16 crc kubenswrapper[4687]: I1125 09:23:16.397886 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=28.397860609 podStartE2EDuration="28.397860609s" podCreationTimestamp="2025-11-25 09:22:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:16.390040135 +0000 UTC m=+1191.443679863" watchObservedRunningTime="2025-11-25 09:23:16.397860609 +0000 UTC m=+1191.451500327" Nov 25 09:23:16 crc kubenswrapper[4687]: I1125 09:23:16.994784 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-qrhc5" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.125:5353: i/o timeout" Nov 25 09:23:17 crc kubenswrapper[4687]: I1125 09:23:17.382486 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:23:17 crc kubenswrapper[4687]: I1125 09:23:17.382536 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:23:17 crc kubenswrapper[4687]: I1125 09:23:17.388069 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:23:17 crc kubenswrapper[4687]: I1125 09:23:17.389652 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.276692 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.625800 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.627312 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.627436 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.627648 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.662853 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.678369 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.678425 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.678440 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.678773 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.680560 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.733240 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.736864 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 09:23:18 crc kubenswrapper[4687]: I1125 09:23:18.761390 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:23:20 crc kubenswrapper[4687]: I1125 09:23:20.503217 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:23:22 crc kubenswrapper[4687]: I1125 09:23:22.446049 4687 generic.go:334] "Generic (PLEG): container finished" podID="baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" containerID="cb2dd5ea0c36e152c7d609cbb53dc52ee7945b0dcff8874679650e5b3a9342c0" exitCode=0 Nov 25 09:23:22 crc kubenswrapper[4687]: I1125 09:23:22.446286 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lvpd8" event={"ID":"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8","Type":"ContainerDied","Data":"cb2dd5ea0c36e152c7d609cbb53dc52ee7945b0dcff8874679650e5b3a9342c0"} Nov 25 09:23:22 crc kubenswrapper[4687]: I1125 09:23:22.830999 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 09:23:22 crc kubenswrapper[4687]: I1125 09:23:22.908704 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 09:23:22 crc kubenswrapper[4687]: I1125 09:23:22.908848 4687 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:23:23 crc kubenswrapper[4687]: I1125 09:23:23.467171 4687 generic.go:334] "Generic (PLEG): container finished" podID="7c2bb808-f45c-4126-94b9-36187402c9d7" containerID="6f4f84f88751424025dafc2832f22dc1a206b03fc6b76ff91fdac97f54082182" exitCode=0 Nov 25 09:23:23 crc kubenswrapper[4687]: I1125 09:23:23.467352 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gpc68" event={"ID":"7c2bb808-f45c-4126-94b9-36187402c9d7","Type":"ContainerDied","Data":"6f4f84f88751424025dafc2832f22dc1a206b03fc6b76ff91fdac97f54082182"} Nov 25 09:23:23 crc kubenswrapper[4687]: I1125 09:23:23.598060 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 09:23:23 crc kubenswrapper[4687]: I1125 09:23:23.844375 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:23:23 crc kubenswrapper[4687]: I1125 09:23:23.844438 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:23:24 crc kubenswrapper[4687]: I1125 09:23:24.092858 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 09:23:24 crc kubenswrapper[4687]: I1125 09:23:24.487897 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-765t4" event={"ID":"7bb5f689-fd43-4fa3-b5a9-6603155ff184","Type":"ContainerDied","Data":"8dc9ece059db60550aa80335a9b974c0c67756ac0fb2b73973e878346866870b"} Nov 25 09:23:24 crc kubenswrapper[4687]: I1125 09:23:24.488010 4687 generic.go:334] "Generic (PLEG): container finished" podID="7bb5f689-fd43-4fa3-b5a9-6603155ff184" containerID="8dc9ece059db60550aa80335a9b974c0c67756ac0fb2b73973e878346866870b" exitCode=0 Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.146446 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-765t4" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.165842 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.195926 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gpc68" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.225767 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-scripts\") pod \"7c2bb808-f45c-4126-94b9-36187402c9d7\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.225805 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-config-data\") pod \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.225829 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-config\") pod \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.225862 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-credential-keys\") pod \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.225911 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-scripts\") pod \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.225935 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mm6kk\" (UniqueName: \"kubernetes.io/projected/7c2bb808-f45c-4126-94b9-36187402c9d7-kube-api-access-mm6kk\") pod \"7c2bb808-f45c-4126-94b9-36187402c9d7\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.225987 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c2bb808-f45c-4126-94b9-36187402c9d7-logs\") pod \"7c2bb808-f45c-4126-94b9-36187402c9d7\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.226011 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clbh2\" (UniqueName: \"kubernetes.io/projected/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-kube-api-access-clbh2\") pod \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.226032 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-fernet-keys\") pod \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.226049 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-combined-ca-bundle\") pod \"7c2bb808-f45c-4126-94b9-36187402c9d7\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.226083 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-combined-ca-bundle\") pod \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\" (UID: \"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.226109 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlffs\" (UniqueName: \"kubernetes.io/projected/7bb5f689-fd43-4fa3-b5a9-6603155ff184-kube-api-access-nlffs\") pod \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.226166 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-config-data\") pod \"7c2bb808-f45c-4126-94b9-36187402c9d7\" (UID: \"7c2bb808-f45c-4126-94b9-36187402c9d7\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.226187 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-combined-ca-bundle\") pod \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\" (UID: \"7bb5f689-fd43-4fa3-b5a9-6603155ff184\") " Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.227406 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c2bb808-f45c-4126-94b9-36187402c9d7-logs" (OuterVolumeSpecName: "logs") pod "7c2bb808-f45c-4126-94b9-36187402c9d7" (UID: "7c2bb808-f45c-4126-94b9-36187402c9d7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.249403 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-scripts" (OuterVolumeSpecName: "scripts") pod "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" (UID: "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.249945 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-kube-api-access-clbh2" (OuterVolumeSpecName: "kube-api-access-clbh2") pod "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" (UID: "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8"). InnerVolumeSpecName "kube-api-access-clbh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.254738 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-scripts" (OuterVolumeSpecName: "scripts") pod "7c2bb808-f45c-4126-94b9-36187402c9d7" (UID: "7c2bb808-f45c-4126-94b9-36187402c9d7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.261994 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" (UID: "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.262073 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c2bb808-f45c-4126-94b9-36187402c9d7-kube-api-access-mm6kk" (OuterVolumeSpecName: "kube-api-access-mm6kk") pod "7c2bb808-f45c-4126-94b9-36187402c9d7" (UID: "7c2bb808-f45c-4126-94b9-36187402c9d7"). InnerVolumeSpecName "kube-api-access-mm6kk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.262761 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb5f689-fd43-4fa3-b5a9-6603155ff184-kube-api-access-nlffs" (OuterVolumeSpecName: "kube-api-access-nlffs") pod "7bb5f689-fd43-4fa3-b5a9-6603155ff184" (UID: "7bb5f689-fd43-4fa3-b5a9-6603155ff184"). InnerVolumeSpecName "kube-api-access-nlffs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.279728 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" (UID: "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.291629 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7bb5f689-fd43-4fa3-b5a9-6603155ff184" (UID: "7bb5f689-fd43-4fa3-b5a9-6603155ff184"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.315844 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-config" (OuterVolumeSpecName: "config") pod "7bb5f689-fd43-4fa3-b5a9-6603155ff184" (UID: "7bb5f689-fd43-4fa3-b5a9-6603155ff184"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.316665 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" (UID: "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.318899 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c2bb808-f45c-4126-94b9-36187402c9d7" (UID: "7c2bb808-f45c-4126-94b9-36187402c9d7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.327889 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c2bb808-f45c-4126-94b9-36187402c9d7-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.327939 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clbh2\" (UniqueName: \"kubernetes.io/projected/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-kube-api-access-clbh2\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.327958 4687 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.327970 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.327981 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.328000 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlffs\" (UniqueName: \"kubernetes.io/projected/7bb5f689-fd43-4fa3-b5a9-6603155ff184-kube-api-access-nlffs\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.328010 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.328020 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.328032 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7bb5f689-fd43-4fa3-b5a9-6603155ff184-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.328043 4687 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.328054 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.328065 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mm6kk\" (UniqueName: \"kubernetes.io/projected/7c2bb808-f45c-4126-94b9-36187402c9d7-kube-api-access-mm6kk\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.334026 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-config-data" (OuterVolumeSpecName: "config-data") pod "7c2bb808-f45c-4126-94b9-36187402c9d7" (UID: "7c2bb808-f45c-4126-94b9-36187402c9d7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.341842 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-config-data" (OuterVolumeSpecName: "config-data") pod "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" (UID: "baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.429488 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.430404 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2bb808-f45c-4126-94b9-36187402c9d7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.513385 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gpc68" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.513416 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gpc68" event={"ID":"7c2bb808-f45c-4126-94b9-36187402c9d7","Type":"ContainerDied","Data":"548b3a99d23f3a3c278f9a75dd4289ae55a0e9cdca89666b2ffd6c7cd199df20"} Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.513807 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548b3a99d23f3a3c278f9a75dd4289ae55a0e9cdca89666b2ffd6c7cd199df20" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.515788 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-765t4" event={"ID":"7bb5f689-fd43-4fa3-b5a9-6603155ff184","Type":"ContainerDied","Data":"838c083fd028fd80789902ec20b3b738f2944c954f5cfbc708fd984ea08a866c"} Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.515829 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="838c083fd028fd80789902ec20b3b738f2944c954f5cfbc708fd984ea08a866c" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.515898 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-765t4" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.521318 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09662da0-b802-43c3-9c8e-4c9e951bdd7f","Type":"ContainerStarted","Data":"55641debda0cfb9cd4ff16ce9abff92b66ae9a3061601c32b13947c28897ed00"} Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.529827 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lvpd8" event={"ID":"baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8","Type":"ContainerDied","Data":"481ef1e73e36aa4571cf96ee48fc7092e09a6030d08641d20c8837ebfb00c12d"} Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.529869 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="481ef1e73e36aa4571cf96ee48fc7092e09a6030d08641d20c8837ebfb00c12d" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.529935 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lvpd8" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.778466 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-4q4h4"] Nov 25 09:23:26 crc kubenswrapper[4687]: E1125 09:23:26.778854 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c2bb808-f45c-4126-94b9-36187402c9d7" containerName="placement-db-sync" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.778872 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c2bb808-f45c-4126-94b9-36187402c9d7" containerName="placement-db-sync" Nov 25 09:23:26 crc kubenswrapper[4687]: E1125 09:23:26.778882 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bb5f689-fd43-4fa3-b5a9-6603155ff184" containerName="neutron-db-sync" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.778889 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bb5f689-fd43-4fa3-b5a9-6603155ff184" containerName="neutron-db-sync" Nov 25 09:23:26 crc kubenswrapper[4687]: E1125 09:23:26.778921 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerName="init" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.778927 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerName="init" Nov 25 09:23:26 crc kubenswrapper[4687]: E1125 09:23:26.778942 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerName="dnsmasq-dns" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.778947 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerName="dnsmasq-dns" Nov 25 09:23:26 crc kubenswrapper[4687]: E1125 09:23:26.778956 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" containerName="keystone-bootstrap" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.778961 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" containerName="keystone-bootstrap" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.779157 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="5796267d-68c1-4f9f-bffe-0edaba3fa4d1" containerName="dnsmasq-dns" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.779168 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bb5f689-fd43-4fa3-b5a9-6603155ff184" containerName="neutron-db-sync" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.779179 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" containerName="keystone-bootstrap" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.779193 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c2bb808-f45c-4126-94b9-36187402c9d7" containerName="placement-db-sync" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.780097 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.848945 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-4q4h4"] Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.949972 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llrgt\" (UniqueName: \"kubernetes.io/projected/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-kube-api-access-llrgt\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.950401 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-config\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.950581 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-svc\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.950657 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.950701 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.950739 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.958988 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-79bbc45dbd-qv76b"] Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.961068 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.965088 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.965287 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.965480 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2rdqk" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.965783 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 09:23:26 crc kubenswrapper[4687]: I1125 09:23:26.988216 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-79bbc45dbd-qv76b"] Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.052543 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-svc\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.052630 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.053845 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-svc\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.053861 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.053931 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.053968 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.054024 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llrgt\" (UniqueName: \"kubernetes.io/projected/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-kube-api-access-llrgt\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.054051 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-config\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.054805 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-config\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.055382 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.055980 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.087008 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llrgt\" (UniqueName: \"kubernetes.io/projected/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-kube-api-access-llrgt\") pod \"dnsmasq-dns-6b7b667979-4q4h4\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.122942 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.159067 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76ph9\" (UniqueName: \"kubernetes.io/projected/b919f6e7-2668-404e-b839-5173deb3824d-kube-api-access-76ph9\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.159180 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-httpd-config\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.159205 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-ovndb-tls-certs\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.159234 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-combined-ca-bundle\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.159281 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-config\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.260535 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-config\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.260676 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76ph9\" (UniqueName: \"kubernetes.io/projected/b919f6e7-2668-404e-b839-5173deb3824d-kube-api-access-76ph9\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.260738 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-httpd-config\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.260762 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-ovndb-tls-certs\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.260781 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-combined-ca-bundle\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.268328 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-httpd-config\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.272286 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-ovndb-tls-certs\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.272355 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-combined-ca-bundle\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.284173 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76ph9\" (UniqueName: \"kubernetes.io/projected/b919f6e7-2668-404e-b839-5173deb3824d-kube-api-access-76ph9\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.284655 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-config\") pod \"neutron-79bbc45dbd-qv76b\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.327923 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.392642 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7dc8446cb-d6wz7" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.392701 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-69b7bcc78d-r6t7q" podUID="e4c7abdb-6d41-42c3-a228-27ebd825e7b5" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.147:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.147:8443: connect: connection refused" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.399561 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5bf794b984-bbcp5"] Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.426021 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.432173 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.432405 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.432562 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-htp9p" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.432722 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.432861 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.473464 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5bf794b984-bbcp5"] Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.479085 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-config-data\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.479196 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-internal-tls-certs\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.479282 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ca801008-2024-4b8d-a69b-2f468a78f1a1-logs\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.479347 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-public-tls-certs\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.479396 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-combined-ca-bundle\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.479465 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-scripts\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.479740 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk9z8\" (UniqueName: \"kubernetes.io/projected/ca801008-2024-4b8d-a69b-2f468a78f1a1-kube-api-access-rk9z8\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.518658 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6cd548ffc8-p78fk"] Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.539253 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.539902 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6cd548ffc8-p78fk"] Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.547143 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.547651 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.548085 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-l777p" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.548223 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.548337 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.548460 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.604340 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-config-data\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.604665 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-internal-tls-certs\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.604826 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ca801008-2024-4b8d-a69b-2f468a78f1a1-logs\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.604967 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-public-tls-certs\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.605128 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-combined-ca-bundle\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.605267 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-scripts\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.605400 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk9z8\" (UniqueName: \"kubernetes.io/projected/ca801008-2024-4b8d-a69b-2f468a78f1a1-kube-api-access-rk9z8\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.610984 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ca801008-2024-4b8d-a69b-2f468a78f1a1-logs\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.629677 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-public-tls-certs\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.633476 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-config-data\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.656554 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-4q4h4"] Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.664813 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-scripts\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.664855 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-internal-tls-certs\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.664907 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca801008-2024-4b8d-a69b-2f468a78f1a1-combined-ca-bundle\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.664947 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk9z8\" (UniqueName: \"kubernetes.io/projected/ca801008-2024-4b8d-a69b-2f468a78f1a1-kube-api-access-rk9z8\") pod \"placement-5bf794b984-bbcp5\" (UID: \"ca801008-2024-4b8d-a69b-2f468a78f1a1\") " pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.673785 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2h86j" event={"ID":"53681654-97b6-4586-ba53-8b6b018e04fa","Type":"ContainerStarted","Data":"24477c3d7537d4689d99b2da56ecda859836de0fecd9930e4fefb8d153aff3f5"} Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.708230 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-public-tls-certs\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.708315 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-fernet-keys\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.708421 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-config-data\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.708770 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlw2p\" (UniqueName: \"kubernetes.io/projected/f8e2361f-7cd6-4055-8e0d-a53eda846c23-kube-api-access-jlw2p\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.708819 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-combined-ca-bundle\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.708986 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-internal-tls-certs\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.709105 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-scripts\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.709131 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-credential-keys\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.803436 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.810254 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-scripts\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.810308 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-credential-keys\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.810362 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-public-tls-certs\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.810387 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-fernet-keys\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.810409 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-config-data\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.810483 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlw2p\" (UniqueName: \"kubernetes.io/projected/f8e2361f-7cd6-4055-8e0d-a53eda846c23-kube-api-access-jlw2p\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.810518 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-combined-ca-bundle\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.810545 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-internal-tls-certs\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.821434 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-internal-tls-certs\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.822672 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-credential-keys\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.823471 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-config-data\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.832837 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-fernet-keys\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.833159 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-scripts\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.833756 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-combined-ca-bundle\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.837932 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8e2361f-7cd6-4055-8e0d-a53eda846c23-public-tls-certs\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.842140 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlw2p\" (UniqueName: \"kubernetes.io/projected/f8e2361f-7cd6-4055-8e0d-a53eda846c23-kube-api-access-jlw2p\") pod \"keystone-6cd548ffc8-p78fk\" (UID: \"f8e2361f-7cd6-4055-8e0d-a53eda846c23\") " pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:27 crc kubenswrapper[4687]: I1125 09:23:27.966048 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.360550 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-2h86j" podStartSLOduration=4.282317629 podStartE2EDuration="51.360532628s" podCreationTimestamp="2025-11-25 09:22:37 +0000 UTC" firstStartedPulling="2025-11-25 09:22:39.122330394 +0000 UTC m=+1154.175970112" lastFinishedPulling="2025-11-25 09:23:26.200545393 +0000 UTC m=+1201.254185111" observedRunningTime="2025-11-25 09:23:27.694324762 +0000 UTC m=+1202.747964480" watchObservedRunningTime="2025-11-25 09:23:28.360532628 +0000 UTC m=+1203.414172346" Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.375233 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-79bbc45dbd-qv76b"] Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.463627 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5bf794b984-bbcp5"] Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.569675 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6cd548ffc8-p78fk"] Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.685548 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8b7k2" event={"ID":"5d974fcb-fbad-4f24-9857-a791205029a0","Type":"ContainerStarted","Data":"120c8e9dd480db83c22337cce02f25f60cda56a443c5e55303820d8ae6e84ea3"} Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.699570 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6cd548ffc8-p78fk" event={"ID":"f8e2361f-7cd6-4055-8e0d-a53eda846c23","Type":"ContainerStarted","Data":"0b994b0275f8e56e7d240606028a1781311c1e1c7b9a8ba5c08603f3dfe1e6f4"} Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.706231 4687 generic.go:334] "Generic (PLEG): container finished" podID="b62e6ed0-d60b-46ea-b90e-a299f8b980cc" containerID="9a8c18c38bea044113284adf7d183b3031ec16a48097337185a937b5f8d679a4" exitCode=0 Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.707035 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" event={"ID":"b62e6ed0-d60b-46ea-b90e-a299f8b980cc","Type":"ContainerDied","Data":"9a8c18c38bea044113284adf7d183b3031ec16a48097337185a937b5f8d679a4"} Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.707067 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" event={"ID":"b62e6ed0-d60b-46ea-b90e-a299f8b980cc","Type":"ContainerStarted","Data":"63b81874f33ee900b0d99035e2a18ca597f92592576d2968be68ce9e9d162e2a"} Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.709295 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-8b7k2" podStartSLOduration=3.226875595 podStartE2EDuration="50.709278733s" podCreationTimestamp="2025-11-25 09:22:38 +0000 UTC" firstStartedPulling="2025-11-25 09:22:39.828383198 +0000 UTC m=+1154.882022916" lastFinishedPulling="2025-11-25 09:23:27.310786336 +0000 UTC m=+1202.364426054" observedRunningTime="2025-11-25 09:23:28.704858161 +0000 UTC m=+1203.758497879" watchObservedRunningTime="2025-11-25 09:23:28.709278733 +0000 UTC m=+1203.762918451" Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.715880 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79bbc45dbd-qv76b" event={"ID":"b919f6e7-2668-404e-b839-5173deb3824d","Type":"ContainerStarted","Data":"6ed594cb7d51a09e95bfec0f4a285571f5700a688a94ae1590c6c40210994234"} Nov 25 09:23:28 crc kubenswrapper[4687]: I1125 09:23:28.723681 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bf794b984-bbcp5" event={"ID":"ca801008-2024-4b8d-a69b-2f468a78f1a1","Type":"ContainerStarted","Data":"425f6020293cc4fe5971ce8ede8a94c96ab286adba987dd9cf85e114bfc876fb"} Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.570085 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-fdc69b5cc-jz28l"] Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.582481 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.588040 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-fdc69b5cc-jz28l"] Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.591740 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.592266 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.660991 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-public-tls-certs\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.661073 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-combined-ca-bundle\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.661131 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-httpd-config\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.661168 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nhcm\" (UniqueName: \"kubernetes.io/projected/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-kube-api-access-7nhcm\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.661207 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-config\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.661285 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-internal-tls-certs\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.661337 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-ovndb-tls-certs\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.756363 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.756409 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" event={"ID":"b62e6ed0-d60b-46ea-b90e-a299f8b980cc","Type":"ContainerStarted","Data":"63afb34fc959015c64d47b479eee835214c16ad75d3007efb2b744e6936d461d"} Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.756431 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79bbc45dbd-qv76b" event={"ID":"b919f6e7-2668-404e-b839-5173deb3824d","Type":"ContainerStarted","Data":"a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2"} Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.756444 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79bbc45dbd-qv76b" event={"ID":"b919f6e7-2668-404e-b839-5173deb3824d","Type":"ContainerStarted","Data":"e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe"} Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.756456 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.763322 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-public-tls-certs\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.763405 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-combined-ca-bundle\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.763455 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-httpd-config\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.763484 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nhcm\" (UniqueName: \"kubernetes.io/projected/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-kube-api-access-7nhcm\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.763535 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-config\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.763601 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-internal-tls-certs\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.763646 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-ovndb-tls-certs\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.771323 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bf794b984-bbcp5" event={"ID":"ca801008-2024-4b8d-a69b-2f468a78f1a1","Type":"ContainerStarted","Data":"b19ded3296476d231c9a2b0a9440bcffe9da90e45ad33d5e0fbdf6fa4355acce"} Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.771369 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bf794b984-bbcp5" event={"ID":"ca801008-2024-4b8d-a69b-2f468a78f1a1","Type":"ContainerStarted","Data":"df0a48feb85794e6609b6ed4e8ad0ecde3103dd9f860baec2a748d33059fd295"} Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.772166 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.772194 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.779134 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-combined-ca-bundle\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.779635 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-public-tls-certs\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.780217 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-internal-tls-certs\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.781546 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6cd548ffc8-p78fk" event={"ID":"f8e2361f-7cd6-4055-8e0d-a53eda846c23","Type":"ContainerStarted","Data":"7877e8eb69b820865b0e03655f4699307fa3a61b4af253f3f4938676ccca3db5"} Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.782723 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.784666 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-ovndb-tls-certs\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.785445 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-config\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.792121 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" podStartSLOduration=3.792097867 podStartE2EDuration="3.792097867s" podCreationTimestamp="2025-11-25 09:23:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:29.768122152 +0000 UTC m=+1204.821761880" watchObservedRunningTime="2025-11-25 09:23:29.792097867 +0000 UTC m=+1204.845737585" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.806697 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-httpd-config\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.814698 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nhcm\" (UniqueName: \"kubernetes.io/projected/e23cf6de-4d7f-40f1-aac9-a397b1c8bb36-kube-api-access-7nhcm\") pod \"neutron-fdc69b5cc-jz28l\" (UID: \"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36\") " pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.865620 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-79bbc45dbd-qv76b" podStartSLOduration=3.865599054 podStartE2EDuration="3.865599054s" podCreationTimestamp="2025-11-25 09:23:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:29.806281234 +0000 UTC m=+1204.859920972" watchObservedRunningTime="2025-11-25 09:23:29.865599054 +0000 UTC m=+1204.919238772" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.872700 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5bf794b984-bbcp5" podStartSLOduration=2.8726641280000003 podStartE2EDuration="2.872664128s" podCreationTimestamp="2025-11-25 09:23:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:29.829229701 +0000 UTC m=+1204.882869429" watchObservedRunningTime="2025-11-25 09:23:29.872664128 +0000 UTC m=+1204.926303846" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.902439 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6cd548ffc8-p78fk" podStartSLOduration=2.90240964 podStartE2EDuration="2.90240964s" podCreationTimestamp="2025-11-25 09:23:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:29.843691016 +0000 UTC m=+1204.897330734" watchObservedRunningTime="2025-11-25 09:23:29.90240964 +0000 UTC m=+1204.956049358" Nov 25 09:23:29 crc kubenswrapper[4687]: I1125 09:23:29.914607 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:30 crc kubenswrapper[4687]: I1125 09:23:30.484480 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-fdc69b5cc-jz28l"] Nov 25 09:23:30 crc kubenswrapper[4687]: I1125 09:23:30.790581 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fdc69b5cc-jz28l" event={"ID":"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36","Type":"ContainerStarted","Data":"714575c7af5e80608a2d363f5cc4abb18454663e89aba3be154d09f1e6a24e4d"} Nov 25 09:23:31 crc kubenswrapper[4687]: I1125 09:23:31.807575 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fdc69b5cc-jz28l" event={"ID":"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36","Type":"ContainerStarted","Data":"80884a4a01d0931216b8aa814ee1081d72fdac8d75bdfd43898601e2569f1e28"} Nov 25 09:23:31 crc kubenswrapper[4687]: I1125 09:23:31.807957 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-fdc69b5cc-jz28l" event={"ID":"e23cf6de-4d7f-40f1-aac9-a397b1c8bb36","Type":"ContainerStarted","Data":"b9b5b49138e992c5eba56239c66f4e3fff72df675f44ee5a862b20c59433a558"} Nov 25 09:23:31 crc kubenswrapper[4687]: I1125 09:23:31.837237 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-fdc69b5cc-jz28l" podStartSLOduration=2.837217804 podStartE2EDuration="2.837217804s" podCreationTimestamp="2025-11-25 09:23:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:31.827693084 +0000 UTC m=+1206.881332802" watchObservedRunningTime="2025-11-25 09:23:31.837217804 +0000 UTC m=+1206.890857522" Nov 25 09:23:32 crc kubenswrapper[4687]: I1125 09:23:32.816699 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:23:35 crc kubenswrapper[4687]: I1125 09:23:35.855869 4687 generic.go:334] "Generic (PLEG): container finished" podID="5d974fcb-fbad-4f24-9857-a791205029a0" containerID="120c8e9dd480db83c22337cce02f25f60cda56a443c5e55303820d8ae6e84ea3" exitCode=0 Nov 25 09:23:35 crc kubenswrapper[4687]: I1125 09:23:35.855946 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8b7k2" event={"ID":"5d974fcb-fbad-4f24-9857-a791205029a0","Type":"ContainerDied","Data":"120c8e9dd480db83c22337cce02f25f60cda56a443c5e55303820d8ae6e84ea3"} Nov 25 09:23:37 crc kubenswrapper[4687]: I1125 09:23:37.125383 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:37 crc kubenswrapper[4687]: I1125 09:23:37.224387 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-v7qrn"] Nov 25 09:23:37 crc kubenswrapper[4687]: I1125 09:23:37.225049 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" podUID="080ab56b-8da0-4b11-9595-6766031cfb41" containerName="dnsmasq-dns" containerID="cri-o://27d92533e56caa77f6e825bfea1df82e54c6c479b32b78267cddb6b34a17e48c" gracePeriod=10 Nov 25 09:23:37 crc kubenswrapper[4687]: I1125 09:23:37.382576 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7dc8446cb-d6wz7" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Nov 25 09:23:37 crc kubenswrapper[4687]: I1125 09:23:37.388049 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-69b7bcc78d-r6t7q" podUID="e4c7abdb-6d41-42c3-a228-27ebd825e7b5" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.147:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.147:8443: connect: connection refused" Nov 25 09:23:37 crc kubenswrapper[4687]: I1125 09:23:37.876197 4687 generic.go:334] "Generic (PLEG): container finished" podID="080ab56b-8da0-4b11-9595-6766031cfb41" containerID="27d92533e56caa77f6e825bfea1df82e54c6c479b32b78267cddb6b34a17e48c" exitCode=0 Nov 25 09:23:37 crc kubenswrapper[4687]: I1125 09:23:37.876248 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" event={"ID":"080ab56b-8da0-4b11-9595-6766031cfb41","Type":"ContainerDied","Data":"27d92533e56caa77f6e825bfea1df82e54c6c479b32b78267cddb6b34a17e48c"} Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.373872 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.530117 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxt6v\" (UniqueName: \"kubernetes.io/projected/5d974fcb-fbad-4f24-9857-a791205029a0-kube-api-access-gxt6v\") pod \"5d974fcb-fbad-4f24-9857-a791205029a0\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.530214 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-combined-ca-bundle\") pod \"5d974fcb-fbad-4f24-9857-a791205029a0\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.530299 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-db-sync-config-data\") pod \"5d974fcb-fbad-4f24-9857-a791205029a0\" (UID: \"5d974fcb-fbad-4f24-9857-a791205029a0\") " Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.538762 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "5d974fcb-fbad-4f24-9857-a791205029a0" (UID: "5d974fcb-fbad-4f24-9857-a791205029a0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.561731 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d974fcb-fbad-4f24-9857-a791205029a0-kube-api-access-gxt6v" (OuterVolumeSpecName: "kube-api-access-gxt6v") pod "5d974fcb-fbad-4f24-9857-a791205029a0" (UID: "5d974fcb-fbad-4f24-9857-a791205029a0"). InnerVolumeSpecName "kube-api-access-gxt6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.579838 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d974fcb-fbad-4f24-9857-a791205029a0" (UID: "5d974fcb-fbad-4f24-9857-a791205029a0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.630756 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.644175 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxt6v\" (UniqueName: \"kubernetes.io/projected/5d974fcb-fbad-4f24-9857-a791205029a0-kube-api-access-gxt6v\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.644235 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.644248 4687 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5d974fcb-fbad-4f24-9857-a791205029a0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:38 crc kubenswrapper[4687]: E1125 09:23:38.744020 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.744892 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-svc\") pod \"080ab56b-8da0-4b11-9595-6766031cfb41\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.745033 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-sb\") pod \"080ab56b-8da0-4b11-9595-6766031cfb41\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.745087 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-swift-storage-0\") pod \"080ab56b-8da0-4b11-9595-6766031cfb41\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.745117 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-config\") pod \"080ab56b-8da0-4b11-9595-6766031cfb41\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.745218 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqvzs\" (UniqueName: \"kubernetes.io/projected/080ab56b-8da0-4b11-9595-6766031cfb41-kube-api-access-hqvzs\") pod \"080ab56b-8da0-4b11-9595-6766031cfb41\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.745297 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-nb\") pod \"080ab56b-8da0-4b11-9595-6766031cfb41\" (UID: \"080ab56b-8da0-4b11-9595-6766031cfb41\") " Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.759813 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/080ab56b-8da0-4b11-9595-6766031cfb41-kube-api-access-hqvzs" (OuterVolumeSpecName: "kube-api-access-hqvzs") pod "080ab56b-8da0-4b11-9595-6766031cfb41" (UID: "080ab56b-8da0-4b11-9595-6766031cfb41"). InnerVolumeSpecName "kube-api-access-hqvzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.809170 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-config" (OuterVolumeSpecName: "config") pod "080ab56b-8da0-4b11-9595-6766031cfb41" (UID: "080ab56b-8da0-4b11-9595-6766031cfb41"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.818963 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "080ab56b-8da0-4b11-9595-6766031cfb41" (UID: "080ab56b-8da0-4b11-9595-6766031cfb41"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.819263 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "080ab56b-8da0-4b11-9595-6766031cfb41" (UID: "080ab56b-8da0-4b11-9595-6766031cfb41"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.824161 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "080ab56b-8da0-4b11-9595-6766031cfb41" (UID: "080ab56b-8da0-4b11-9595-6766031cfb41"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.826078 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "080ab56b-8da0-4b11-9595-6766031cfb41" (UID: "080ab56b-8da0-4b11-9595-6766031cfb41"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.847811 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.847846 4687 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.847856 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.847864 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqvzs\" (UniqueName: \"kubernetes.io/projected/080ab56b-8da0-4b11-9595-6766031cfb41-kube-api-access-hqvzs\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.847874 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.847882 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/080ab56b-8da0-4b11-9595-6766031cfb41-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.885393 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8b7k2" event={"ID":"5d974fcb-fbad-4f24-9857-a791205029a0","Type":"ContainerDied","Data":"92d0db8d8b072c3e4de51c5551db36e1144d3d4cb9f2282e4c669091d0ad501c"} Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.885431 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92d0db8d8b072c3e4de51c5551db36e1144d3d4cb9f2282e4c669091d0ad501c" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.885429 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8b7k2" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.887823 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09662da0-b802-43c3-9c8e-4c9e951bdd7f","Type":"ContainerStarted","Data":"3a81502e023fe181770985aa8eea339beaf86d4f47bfb8fbe3a59f5d7e83136d"} Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.887981 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="ceilometer-notification-agent" containerID="cri-o://4b85b4fc296a4ea78929c1a8a57a40cfa5ce2c00d86d703f758a339b8331c829" gracePeriod=30 Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.888207 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.888490 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="proxy-httpd" containerID="cri-o://3a81502e023fe181770985aa8eea339beaf86d4f47bfb8fbe3a59f5d7e83136d" gracePeriod=30 Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.888574 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="sg-core" containerID="cri-o://55641debda0cfb9cd4ff16ce9abff92b66ae9a3061601c32b13947c28897ed00" gracePeriod=30 Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.894437 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" event={"ID":"080ab56b-8da0-4b11-9595-6766031cfb41","Type":"ContainerDied","Data":"05eda86e5101745a78fcdad654dc6c8f82aae8aea4ce49fde19c46f81ea69425"} Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.894470 4687 scope.go:117] "RemoveContainer" containerID="27d92533e56caa77f6e825bfea1df82e54c6c479b32b78267cddb6b34a17e48c" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.894579 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-v7qrn" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.923186 4687 scope.go:117] "RemoveContainer" containerID="927912f2959fecd35bd5afc3939186980ec12c65afb664317d2b756c7ec69edc" Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.940017 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-v7qrn"] Nov 25 09:23:38 crc kubenswrapper[4687]: I1125 09:23:38.948045 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-v7qrn"] Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.683485 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-c8494b867-8fmmw"] Nov 25 09:23:39 crc kubenswrapper[4687]: E1125 09:23:39.683840 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="080ab56b-8da0-4b11-9595-6766031cfb41" containerName="dnsmasq-dns" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.683855 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="080ab56b-8da0-4b11-9595-6766031cfb41" containerName="dnsmasq-dns" Nov 25 09:23:39 crc kubenswrapper[4687]: E1125 09:23:39.683895 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d974fcb-fbad-4f24-9857-a791205029a0" containerName="barbican-db-sync" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.683904 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d974fcb-fbad-4f24-9857-a791205029a0" containerName="barbican-db-sync" Nov 25 09:23:39 crc kubenswrapper[4687]: E1125 09:23:39.683918 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="080ab56b-8da0-4b11-9595-6766031cfb41" containerName="init" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.683923 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="080ab56b-8da0-4b11-9595-6766031cfb41" containerName="init" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.684107 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d974fcb-fbad-4f24-9857-a791205029a0" containerName="barbican-db-sync" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.684129 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="080ab56b-8da0-4b11-9595-6766031cfb41" containerName="dnsmasq-dns" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.685114 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.698844 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.699051 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.706031 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-gw9r5" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.718068 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-674b9465d-cz7jd"] Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.719540 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.727373 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.728559 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-674b9465d-cz7jd"] Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.759057 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="080ab56b-8da0-4b11-9595-6766031cfb41" path="/var/lib/kubelet/pods/080ab56b-8da0-4b11-9595-6766031cfb41/volumes" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.759659 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-c8494b867-8fmmw"] Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.764403 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90df3ed8-0bc0-4a26-940d-13dd51fd575a-logs\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.764529 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90df3ed8-0bc0-4a26-940d-13dd51fd575a-config-data-custom\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.764558 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90df3ed8-0bc0-4a26-940d-13dd51fd575a-config-data\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.764631 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90df3ed8-0bc0-4a26-940d-13dd51fd575a-combined-ca-bundle\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.764681 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgnct\" (UniqueName: \"kubernetes.io/projected/90df3ed8-0bc0-4a26-940d-13dd51fd575a-kube-api-access-lgnct\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.780562 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-tb2kv"] Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.786685 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.842629 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-tb2kv"] Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866100 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90df3ed8-0bc0-4a26-940d-13dd51fd575a-config-data\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866159 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866191 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-config\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866217 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90df3ed8-0bc0-4a26-940d-13dd51fd575a-combined-ca-bundle\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866257 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgnct\" (UniqueName: \"kubernetes.io/projected/90df3ed8-0bc0-4a26-940d-13dd51fd575a-kube-api-access-lgnct\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866278 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50819727-088c-4d7f-bff7-c95d3d2ece69-combined-ca-bundle\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866306 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtq9x\" (UniqueName: \"kubernetes.io/projected/50819727-088c-4d7f-bff7-c95d3d2ece69-kube-api-access-gtq9x\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866324 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4c9t\" (UniqueName: \"kubernetes.io/projected/ef9042c2-4451-4ab0-9197-6097ec01cd56-kube-api-access-n4c9t\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866362 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50819727-088c-4d7f-bff7-c95d3d2ece69-logs\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866379 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50819727-088c-4d7f-bff7-c95d3d2ece69-config-data-custom\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866400 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90df3ed8-0bc0-4a26-940d-13dd51fd575a-logs\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866443 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866457 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866477 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866492 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50819727-088c-4d7f-bff7-c95d3d2ece69-config-data\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.866529 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90df3ed8-0bc0-4a26-940d-13dd51fd575a-config-data-custom\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.869361 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90df3ed8-0bc0-4a26-940d-13dd51fd575a-logs\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.878693 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90df3ed8-0bc0-4a26-940d-13dd51fd575a-config-data-custom\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.880664 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90df3ed8-0bc0-4a26-940d-13dd51fd575a-config-data\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.881097 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90df3ed8-0bc0-4a26-940d-13dd51fd575a-combined-ca-bundle\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.883430 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-74498bff74-qgdp6"] Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.889058 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.893792 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.897726 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgnct\" (UniqueName: \"kubernetes.io/projected/90df3ed8-0bc0-4a26-940d-13dd51fd575a-kube-api-access-lgnct\") pod \"barbican-worker-c8494b867-8fmmw\" (UID: \"90df3ed8-0bc0-4a26-940d-13dd51fd575a\") " pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.910825 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-74498bff74-qgdp6"] Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.917220 4687 generic.go:334] "Generic (PLEG): container finished" podID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerID="55641debda0cfb9cd4ff16ce9abff92b66ae9a3061601c32b13947c28897ed00" exitCode=2 Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.917288 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09662da0-b802-43c3-9c8e-4c9e951bdd7f","Type":"ContainerDied","Data":"55641debda0cfb9cd4ff16ce9abff92b66ae9a3061601c32b13947c28897ed00"} Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.926773 4687 generic.go:334] "Generic (PLEG): container finished" podID="53681654-97b6-4586-ba53-8b6b018e04fa" containerID="24477c3d7537d4689d99b2da56ecda859836de0fecd9930e4fefb8d153aff3f5" exitCode=0 Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.926805 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2h86j" event={"ID":"53681654-97b6-4586-ba53-8b6b018e04fa","Type":"ContainerDied","Data":"24477c3d7537d4689d99b2da56ecda859836de0fecd9930e4fefb8d153aff3f5"} Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.977968 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/050fcce2-c1d7-49d2-873a-0e85d090174e-logs\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978006 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2ssp\" (UniqueName: \"kubernetes.io/projected/050fcce2-c1d7-49d2-873a-0e85d090174e-kube-api-access-p2ssp\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978031 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50819727-088c-4d7f-bff7-c95d3d2ece69-config-data-custom\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978052 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978075 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data-custom\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978105 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978121 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978143 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978157 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50819727-088c-4d7f-bff7-c95d3d2ece69-config-data\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978213 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978248 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-config\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978312 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50819727-088c-4d7f-bff7-c95d3d2ece69-combined-ca-bundle\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978328 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-combined-ca-bundle\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978348 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtq9x\" (UniqueName: \"kubernetes.io/projected/50819727-088c-4d7f-bff7-c95d3d2ece69-kube-api-access-gtq9x\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978363 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4c9t\" (UniqueName: \"kubernetes.io/projected/ef9042c2-4451-4ab0-9197-6097ec01cd56-kube-api-access-n4c9t\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978400 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50819727-088c-4d7f-bff7-c95d3d2ece69-logs\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.978762 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50819727-088c-4d7f-bff7-c95d3d2ece69-logs\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.982058 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50819727-088c-4d7f-bff7-c95d3d2ece69-config-data-custom\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.982717 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.983211 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-config\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.984137 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.984326 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.984750 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.988141 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50819727-088c-4d7f-bff7-c95d3d2ece69-combined-ca-bundle\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:39 crc kubenswrapper[4687]: I1125 09:23:39.988866 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50819727-088c-4d7f-bff7-c95d3d2ece69-config-data\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.008029 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtq9x\" (UniqueName: \"kubernetes.io/projected/50819727-088c-4d7f-bff7-c95d3d2ece69-kube-api-access-gtq9x\") pod \"barbican-keystone-listener-674b9465d-cz7jd\" (UID: \"50819727-088c-4d7f-bff7-c95d3d2ece69\") " pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.011269 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4c9t\" (UniqueName: \"kubernetes.io/projected/ef9042c2-4451-4ab0-9197-6097ec01cd56-kube-api-access-n4c9t\") pod \"dnsmasq-dns-848cf88cfc-tb2kv\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.011682 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-c8494b867-8fmmw" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.047742 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.079797 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-combined-ca-bundle\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.079891 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/050fcce2-c1d7-49d2-873a-0e85d090174e-logs\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.079931 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2ssp\" (UniqueName: \"kubernetes.io/projected/050fcce2-c1d7-49d2-873a-0e85d090174e-kube-api-access-p2ssp\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.079954 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.080014 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data-custom\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.080843 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/050fcce2-c1d7-49d2-873a-0e85d090174e-logs\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.084621 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-combined-ca-bundle\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.085882 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.086392 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data-custom\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.098632 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2ssp\" (UniqueName: \"kubernetes.io/projected/050fcce2-c1d7-49d2-873a-0e85d090174e-kube-api-access-p2ssp\") pod \"barbican-api-74498bff74-qgdp6\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.108445 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.368678 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.482249 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-c8494b867-8fmmw"] Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.578398 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-674b9465d-cz7jd"] Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.678612 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-tb2kv"] Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.857782 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-74498bff74-qgdp6"] Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.937257 4687 generic.go:334] "Generic (PLEG): container finished" podID="ef9042c2-4451-4ab0-9197-6097ec01cd56" containerID="894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c" exitCode=0 Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.937299 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" event={"ID":"ef9042c2-4451-4ab0-9197-6097ec01cd56","Type":"ContainerDied","Data":"894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c"} Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.937341 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" event={"ID":"ef9042c2-4451-4ab0-9197-6097ec01cd56","Type":"ContainerStarted","Data":"3a71bd7ddbdd16b5d5ff77c5acea865fec57fb045e17ddaa4123392306ee111f"} Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.938562 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" event={"ID":"50819727-088c-4d7f-bff7-c95d3d2ece69","Type":"ContainerStarted","Data":"8671399a7ff2bdaebfd4d7ea128243a45afd31b2361017f80343baa3da542758"} Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.940897 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-c8494b867-8fmmw" event={"ID":"90df3ed8-0bc0-4a26-940d-13dd51fd575a","Type":"ContainerStarted","Data":"17ed48f94be8defbb59b4660d6dc13c7960e6eeecf8f743e82ed6c493eb64490"} Nov 25 09:23:40 crc kubenswrapper[4687]: I1125 09:23:40.943029 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74498bff74-qgdp6" event={"ID":"050fcce2-c1d7-49d2-873a-0e85d090174e","Type":"ContainerStarted","Data":"d08b2b93a7a33cb12997aca78458a014be535d871d5d23464c45ba8ec63d5c56"} Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.244024 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2h86j" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.313207 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/53681654-97b6-4586-ba53-8b6b018e04fa-etc-machine-id\") pod \"53681654-97b6-4586-ba53-8b6b018e04fa\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.313244 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-combined-ca-bundle\") pod \"53681654-97b6-4586-ba53-8b6b018e04fa\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.313297 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzvlh\" (UniqueName: \"kubernetes.io/projected/53681654-97b6-4586-ba53-8b6b018e04fa-kube-api-access-pzvlh\") pod \"53681654-97b6-4586-ba53-8b6b018e04fa\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.313329 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-config-data\") pod \"53681654-97b6-4586-ba53-8b6b018e04fa\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.313339 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/53681654-97b6-4586-ba53-8b6b018e04fa-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "53681654-97b6-4586-ba53-8b6b018e04fa" (UID: "53681654-97b6-4586-ba53-8b6b018e04fa"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.313396 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-scripts\") pod \"53681654-97b6-4586-ba53-8b6b018e04fa\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.313521 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-db-sync-config-data\") pod \"53681654-97b6-4586-ba53-8b6b018e04fa\" (UID: \"53681654-97b6-4586-ba53-8b6b018e04fa\") " Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.313914 4687 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/53681654-97b6-4586-ba53-8b6b018e04fa-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.319243 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "53681654-97b6-4586-ba53-8b6b018e04fa" (UID: "53681654-97b6-4586-ba53-8b6b018e04fa"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.320222 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-scripts" (OuterVolumeSpecName: "scripts") pod "53681654-97b6-4586-ba53-8b6b018e04fa" (UID: "53681654-97b6-4586-ba53-8b6b018e04fa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.321340 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53681654-97b6-4586-ba53-8b6b018e04fa-kube-api-access-pzvlh" (OuterVolumeSpecName: "kube-api-access-pzvlh") pod "53681654-97b6-4586-ba53-8b6b018e04fa" (UID: "53681654-97b6-4586-ba53-8b6b018e04fa"). InnerVolumeSpecName "kube-api-access-pzvlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.356282 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "53681654-97b6-4586-ba53-8b6b018e04fa" (UID: "53681654-97b6-4586-ba53-8b6b018e04fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.395726 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-config-data" (OuterVolumeSpecName: "config-data") pod "53681654-97b6-4586-ba53-8b6b018e04fa" (UID: "53681654-97b6-4586-ba53-8b6b018e04fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.415135 4687 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.415173 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.415185 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzvlh\" (UniqueName: \"kubernetes.io/projected/53681654-97b6-4586-ba53-8b6b018e04fa-kube-api-access-pzvlh\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.415196 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.415206 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53681654-97b6-4586-ba53-8b6b018e04fa-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.963766 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2h86j" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.964255 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2h86j" event={"ID":"53681654-97b6-4586-ba53-8b6b018e04fa","Type":"ContainerDied","Data":"00cfb1c8362ee83370ad40a2f2ce4e06c53c63fc208c869c3150dd469eeec6c5"} Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.964293 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00cfb1c8362ee83370ad40a2f2ce4e06c53c63fc208c869c3150dd469eeec6c5" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.970675 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74498bff74-qgdp6" event={"ID":"050fcce2-c1d7-49d2-873a-0e85d090174e","Type":"ContainerStarted","Data":"ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f"} Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.970716 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74498bff74-qgdp6" event={"ID":"050fcce2-c1d7-49d2-873a-0e85d090174e","Type":"ContainerStarted","Data":"7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0"} Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.970860 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.970892 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.972912 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" event={"ID":"ef9042c2-4451-4ab0-9197-6097ec01cd56","Type":"ContainerStarted","Data":"c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e"} Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.973373 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:41 crc kubenswrapper[4687]: I1125 09:23:41.988110 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-74498bff74-qgdp6" podStartSLOduration=2.988092519 podStartE2EDuration="2.988092519s" podCreationTimestamp="2025-11-25 09:23:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:41.984901591 +0000 UTC m=+1217.038541329" watchObservedRunningTime="2025-11-25 09:23:41.988092519 +0000 UTC m=+1217.041732237" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.031003 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" podStartSLOduration=3.03098273 podStartE2EDuration="3.03098273s" podCreationTimestamp="2025-11-25 09:23:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:42.014079548 +0000 UTC m=+1217.067719286" watchObservedRunningTime="2025-11-25 09:23:42.03098273 +0000 UTC m=+1217.084622438" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.271521 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:23:42 crc kubenswrapper[4687]: E1125 09:23:42.272204 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53681654-97b6-4586-ba53-8b6b018e04fa" containerName="cinder-db-sync" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.272220 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="53681654-97b6-4586-ba53-8b6b018e04fa" containerName="cinder-db-sync" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.272382 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="53681654-97b6-4586-ba53-8b6b018e04fa" containerName="cinder-db-sync" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.273300 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.280598 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.281519 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.281797 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.282190 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-nwxld" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.310660 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.343954 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-tb2kv"] Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.389572 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xg9r6"] Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.391207 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.399947 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xg9r6"] Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.431388 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.432918 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.442979 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.444283 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.444328 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.444355 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.444369 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9flbv\" (UniqueName: \"kubernetes.io/projected/2b8071e8-54ac-4251-9604-d0dd6da182e2-kube-api-access-9flbv\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.444428 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2b8071e8-54ac-4251-9604-d0dd6da182e2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.444467 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-scripts\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.490937 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.545641 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2b8071e8-54ac-4251-9604-d0dd6da182e2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.545795 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2b8071e8-54ac-4251-9604-d0dd6da182e2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.545867 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-etc-machine-id\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.545888 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwzvt\" (UniqueName: \"kubernetes.io/projected/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-kube-api-access-fwzvt\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.545911 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.545934 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546013 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-scripts\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546105 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-scripts\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546153 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546209 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546263 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data-custom\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546323 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546385 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546443 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546462 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9flbv\" (UniqueName: \"kubernetes.io/projected/2b8071e8-54ac-4251-9604-d0dd6da182e2-kube-api-access-9flbv\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546520 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-svc\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546544 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-logs\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546573 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546690 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-config\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.546845 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb4rn\" (UniqueName: \"kubernetes.io/projected/a6ddcca5-c362-40ff-94ab-feb6330cc792-kube-api-access-nb4rn\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.551351 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.551709 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-scripts\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.553135 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.554425 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.563996 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9flbv\" (UniqueName: \"kubernetes.io/projected/2b8071e8-54ac-4251-9604-d0dd6da182e2-kube-api-access-9flbv\") pod \"cinder-scheduler-0\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.612130 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650479 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-svc\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650572 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-logs\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650609 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650678 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-config\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650709 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb4rn\" (UniqueName: \"kubernetes.io/projected/a6ddcca5-c362-40ff-94ab-feb6330cc792-kube-api-access-nb4rn\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650737 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-etc-machine-id\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650756 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwzvt\" (UniqueName: \"kubernetes.io/projected/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-kube-api-access-fwzvt\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650779 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650802 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650837 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-scripts\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650901 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650939 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.650975 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data-custom\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.651915 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-etc-machine-id\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.652482 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-logs\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.653088 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-config\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.653961 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.654094 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.654816 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-svc\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.657066 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.657915 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-scripts\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.659659 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.662044 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data-custom\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.666343 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.670481 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwzvt\" (UniqueName: \"kubernetes.io/projected/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-kube-api-access-fwzvt\") pod \"cinder-api-0\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.673684 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb4rn\" (UniqueName: \"kubernetes.io/projected/a6ddcca5-c362-40ff-94ab-feb6330cc792-kube-api-access-nb4rn\") pod \"dnsmasq-dns-6578955fd5-xg9r6\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.751532 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.802778 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.957024 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6fd7f79f6b-n9xqk"] Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.961638 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.969156 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 09:23:42 crc kubenswrapper[4687]: I1125 09:23:42.970254 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.012138 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-c8494b867-8fmmw" event={"ID":"90df3ed8-0bc0-4a26-940d-13dd51fd575a","Type":"ContainerStarted","Data":"1353f22939315032267338be37a649beaffb77dee34122f45ca406bc5b3823a3"} Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.016570 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.045853 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6fd7f79f6b-n9xqk"] Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.068603 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8l4mm\" (UniqueName: \"kubernetes.io/projected/972adc4c-cd8b-4ead-a7da-1f21cf692157-kube-api-access-8l4mm\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.068653 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-combined-ca-bundle\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.068679 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/972adc4c-cd8b-4ead-a7da-1f21cf692157-logs\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.068717 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-public-tls-certs\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.068749 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-internal-tls-certs\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.068774 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-config-data-custom\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.068835 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-config-data\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.170673 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-config-data\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.171035 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8l4mm\" (UniqueName: \"kubernetes.io/projected/972adc4c-cd8b-4ead-a7da-1f21cf692157-kube-api-access-8l4mm\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.171065 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-combined-ca-bundle\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.171086 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/972adc4c-cd8b-4ead-a7da-1f21cf692157-logs\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.171133 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-public-tls-certs\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.171170 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-internal-tls-certs\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.171198 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-config-data-custom\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.174590 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/972adc4c-cd8b-4ead-a7da-1f21cf692157-logs\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.176532 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-config-data-custom\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.177221 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-combined-ca-bundle\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.177414 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-public-tls-certs\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.182165 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-config-data\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.185945 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/972adc4c-cd8b-4ead-a7da-1f21cf692157-internal-tls-certs\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.201514 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8l4mm\" (UniqueName: \"kubernetes.io/projected/972adc4c-cd8b-4ead-a7da-1f21cf692157-kube-api-access-8l4mm\") pod \"barbican-api-6fd7f79f6b-n9xqk\" (UID: \"972adc4c-cd8b-4ead-a7da-1f21cf692157\") " pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.304422 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.557618 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xg9r6"] Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.637159 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:23:43 crc kubenswrapper[4687]: I1125 09:23:43.826325 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6fd7f79f6b-n9xqk"] Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.034455 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-c8494b867-8fmmw" event={"ID":"90df3ed8-0bc0-4a26-940d-13dd51fd575a","Type":"ContainerStarted","Data":"16732b26af26ae324f3e677ad9767e0355621d6df83698f10823a8bb7865f907"} Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.039641 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" event={"ID":"a6ddcca5-c362-40ff-94ab-feb6330cc792","Type":"ContainerStarted","Data":"8c238b9ee45a69696cfd408c888c342c1510cb6c0405d77e4584b05c49ca4691"} Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.039684 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" event={"ID":"a6ddcca5-c362-40ff-94ab-feb6330cc792","Type":"ContainerStarted","Data":"c002f1666eedb9ac434a0dcd7b17032ed304366c3b90d78d32973e34afd5f469"} Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.041000 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6fd7f79f6b-n9xqk" event={"ID":"972adc4c-cd8b-4ead-a7da-1f21cf692157","Type":"ContainerStarted","Data":"6d5e7ec3b8bee87901d6f2f0283952b8d6f8f2e76f081a4a32aec9c755c7e578"} Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.042538 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2b8071e8-54ac-4251-9604-d0dd6da182e2","Type":"ContainerStarted","Data":"cf834602fdba5ac2ae2ab02a9982e99656cbd428dd4d568e81ea79791f797ad7"} Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.044744 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" event={"ID":"50819727-088c-4d7f-bff7-c95d3d2ece69","Type":"ContainerStarted","Data":"a82879bec73d27d001607cce71c6ab9faf64be466e4bc55a2f35cc2ac06482da"} Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.044785 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" event={"ID":"50819727-088c-4d7f-bff7-c95d3d2ece69","Type":"ContainerStarted","Data":"0befbc957474d1e08b8c93fb9438f100e84015f93939771ecd29effcbaba5edd"} Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.048139 4687 generic.go:334] "Generic (PLEG): container finished" podID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerID="4b85b4fc296a4ea78929c1a8a57a40cfa5ce2c00d86d703f758a339b8331c829" exitCode=0 Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.048199 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09662da0-b802-43c3-9c8e-4c9e951bdd7f","Type":"ContainerDied","Data":"4b85b4fc296a4ea78929c1a8a57a40cfa5ce2c00d86d703f758a339b8331c829"} Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.051749 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" podUID="ef9042c2-4451-4ab0-9197-6097ec01cd56" containerName="dnsmasq-dns" containerID="cri-o://c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e" gracePeriod=10 Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.051990 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420","Type":"ContainerStarted","Data":"94baf5477978c8425698a6ed27bf3c87f81b44e5b8dab0fa4790a4036705c0e0"} Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.053842 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-c8494b867-8fmmw" podStartSLOduration=3.244223764 podStartE2EDuration="5.053829169s" podCreationTimestamp="2025-11-25 09:23:39 +0000 UTC" firstStartedPulling="2025-11-25 09:23:40.522743856 +0000 UTC m=+1215.576383574" lastFinishedPulling="2025-11-25 09:23:42.332349261 +0000 UTC m=+1217.385988979" observedRunningTime="2025-11-25 09:23:44.050397444 +0000 UTC m=+1219.104037162" watchObservedRunningTime="2025-11-25 09:23:44.053829169 +0000 UTC m=+1219.107468887" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.136106 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-674b9465d-cz7jd" podStartSLOduration=3.388999438 podStartE2EDuration="5.136088616s" podCreationTimestamp="2025-11-25 09:23:39 +0000 UTC" firstStartedPulling="2025-11-25 09:23:40.588842501 +0000 UTC m=+1215.642482219" lastFinishedPulling="2025-11-25 09:23:42.335931679 +0000 UTC m=+1217.389571397" observedRunningTime="2025-11-25 09:23:44.076406895 +0000 UTC m=+1219.130046613" watchObservedRunningTime="2025-11-25 09:23:44.136088616 +0000 UTC m=+1219.189728334" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.791561 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.819266 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-nb\") pod \"ef9042c2-4451-4ab0-9197-6097ec01cd56\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.819451 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-config\") pod \"ef9042c2-4451-4ab0-9197-6097ec01cd56\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.819487 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-svc\") pod \"ef9042c2-4451-4ab0-9197-6097ec01cd56\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.819626 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-sb\") pod \"ef9042c2-4451-4ab0-9197-6097ec01cd56\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.819681 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-swift-storage-0\") pod \"ef9042c2-4451-4ab0-9197-6097ec01cd56\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.819735 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4c9t\" (UniqueName: \"kubernetes.io/projected/ef9042c2-4451-4ab0-9197-6097ec01cd56-kube-api-access-n4c9t\") pod \"ef9042c2-4451-4ab0-9197-6097ec01cd56\" (UID: \"ef9042c2-4451-4ab0-9197-6097ec01cd56\") " Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.845866 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef9042c2-4451-4ab0-9197-6097ec01cd56-kube-api-access-n4c9t" (OuterVolumeSpecName: "kube-api-access-n4c9t") pod "ef9042c2-4451-4ab0-9197-6097ec01cd56" (UID: "ef9042c2-4451-4ab0-9197-6097ec01cd56"). InnerVolumeSpecName "kube-api-access-n4c9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.901899 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ef9042c2-4451-4ab0-9197-6097ec01cd56" (UID: "ef9042c2-4451-4ab0-9197-6097ec01cd56"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.904895 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ef9042c2-4451-4ab0-9197-6097ec01cd56" (UID: "ef9042c2-4451-4ab0-9197-6097ec01cd56"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.922455 4687 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.922493 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4c9t\" (UniqueName: \"kubernetes.io/projected/ef9042c2-4451-4ab0-9197-6097ec01cd56-kube-api-access-n4c9t\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.922522 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.933310 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ef9042c2-4451-4ab0-9197-6097ec01cd56" (UID: "ef9042c2-4451-4ab0-9197-6097ec01cd56"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.938493 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ef9042c2-4451-4ab0-9197-6097ec01cd56" (UID: "ef9042c2-4451-4ab0-9197-6097ec01cd56"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:44 crc kubenswrapper[4687]: I1125 09:23:44.965622 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-config" (OuterVolumeSpecName: "config") pod "ef9042c2-4451-4ab0-9197-6097ec01cd56" (UID: "ef9042c2-4451-4ab0-9197-6097ec01cd56"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.024008 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.024047 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.024060 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef9042c2-4451-4ab0-9197-6097ec01cd56-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.062712 4687 generic.go:334] "Generic (PLEG): container finished" podID="a6ddcca5-c362-40ff-94ab-feb6330cc792" containerID="8c238b9ee45a69696cfd408c888c342c1510cb6c0405d77e4584b05c49ca4691" exitCode=0 Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.062745 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" event={"ID":"a6ddcca5-c362-40ff-94ab-feb6330cc792","Type":"ContainerDied","Data":"8c238b9ee45a69696cfd408c888c342c1510cb6c0405d77e4584b05c49ca4691"} Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.064237 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6fd7f79f6b-n9xqk" event={"ID":"972adc4c-cd8b-4ead-a7da-1f21cf692157","Type":"ContainerStarted","Data":"ebf6c968edb3ee044ee9cc358f3a03c23114840e0bf6143757a825aa652693b0"} Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.064269 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6fd7f79f6b-n9xqk" event={"ID":"972adc4c-cd8b-4ead-a7da-1f21cf692157","Type":"ContainerStarted","Data":"f61b7b4e6f410259fa056d6ad1f76f67c1ab520c837961d9a95290b5028c50ee"} Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.064304 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.064375 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.070019 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2b8071e8-54ac-4251-9604-d0dd6da182e2","Type":"ContainerStarted","Data":"2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5"} Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.072885 4687 generic.go:334] "Generic (PLEG): container finished" podID="ef9042c2-4451-4ab0-9197-6097ec01cd56" containerID="c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e" exitCode=0 Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.072929 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" event={"ID":"ef9042c2-4451-4ab0-9197-6097ec01cd56","Type":"ContainerDied","Data":"c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e"} Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.072943 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.072955 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-tb2kv" event={"ID":"ef9042c2-4451-4ab0-9197-6097ec01cd56","Type":"ContainerDied","Data":"3a71bd7ddbdd16b5d5ff77c5acea865fec57fb045e17ddaa4123392306ee111f"} Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.072971 4687 scope.go:117] "RemoveContainer" containerID="c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.074771 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420","Type":"ContainerStarted","Data":"228c140127f40b55adc70328d5a7dea612a2bb1d580239a78306d359ef96a17c"} Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.119156 4687 scope.go:117] "RemoveContainer" containerID="894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.125598 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6fd7f79f6b-n9xqk" podStartSLOduration=3.12557953 podStartE2EDuration="3.12557953s" podCreationTimestamp="2025-11-25 09:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:45.117228132 +0000 UTC m=+1220.170867870" watchObservedRunningTime="2025-11-25 09:23:45.12557953 +0000 UTC m=+1220.179219268" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.144869 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-tb2kv"] Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.153082 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-tb2kv"] Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.172203 4687 scope.go:117] "RemoveContainer" containerID="c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e" Nov 25 09:23:45 crc kubenswrapper[4687]: E1125 09:23:45.172748 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e\": container with ID starting with c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e not found: ID does not exist" containerID="c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.172783 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e"} err="failed to get container status \"c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e\": rpc error: code = NotFound desc = could not find container \"c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e\": container with ID starting with c46c7d7b59a691cc63b38ae2ba1cec1997dea63c9d026c9eb056accadc70404e not found: ID does not exist" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.172808 4687 scope.go:117] "RemoveContainer" containerID="894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c" Nov 25 09:23:45 crc kubenswrapper[4687]: E1125 09:23:45.173055 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c\": container with ID starting with 894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c not found: ID does not exist" containerID="894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.173084 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c"} err="failed to get container status \"894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c\": rpc error: code = NotFound desc = could not find container \"894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c\": container with ID starting with 894f29de7940dae9dbe4de75aa95b96a7d537d4a229879350a11f8286e6aa83c not found: ID does not exist" Nov 25 09:23:45 crc kubenswrapper[4687]: W1125 09:23:45.401490 4687 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb62e6ed0_d60b_46ea_b90e_a299f8b980cc.slice/crio-9a8c18c38bea044113284adf7d183b3031ec16a48097337185a937b5f8d679a4.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb62e6ed0_d60b_46ea_b90e_a299f8b980cc.slice/crio-9a8c18c38bea044113284adf7d183b3031ec16a48097337185a937b5f8d679a4.scope: no such file or directory Nov 25 09:23:45 crc kubenswrapper[4687]: W1125 09:23:45.453447 4687 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef9042c2_4451_4ab0_9197_6097ec01cd56.slice": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef9042c2_4451_4ab0_9197_6097ec01cd56.slice: no such file or directory Nov 25 09:23:45 crc kubenswrapper[4687]: E1125 09:23:45.672574 4687 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod080ab56b_8da0_4b11_9595_6766031cfb41.slice/crio-27d92533e56caa77f6e825bfea1df82e54c6c479b32b78267cddb6b34a17e48c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod080ab56b_8da0_4b11_9595_6766031cfb41.slice/crio-conmon-27d92533e56caa77f6e825bfea1df82e54c6c479b32b78267cddb6b34a17e48c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09662da0_b802_43c3_9c8e_4c9e951bdd7f.slice/crio-conmon-55641debda0cfb9cd4ff16ce9abff92b66ae9a3061601c32b13947c28897ed00.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d974fcb_fbad_4f24_9857_a791205029a0.slice/crio-conmon-120c8e9dd480db83c22337cce02f25f60cda56a443c5e55303820d8ae6e84ea3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09662da0_b802_43c3_9c8e_4c9e951bdd7f.slice/crio-55641debda0cfb9cd4ff16ce9abff92b66ae9a3061601c32b13947c28897ed00.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53681654_97b6_4586_ba53_8b6b018e04fa.slice/crio-24477c3d7537d4689d99b2da56ecda859836de0fecd9930e4fefb8d153aff3f5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod080ab56b_8da0_4b11_9595_6766031cfb41.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5df3d575_c461_43d1_af22_d9f15aaf06b7.slice/crio-ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53681654_97b6_4586_ba53_8b6b018e04fa.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53681654_97b6_4586_ba53_8b6b018e04fa.slice/crio-conmon-24477c3d7537d4689d99b2da56ecda859836de0fecd9930e4fefb8d153aff3f5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod41b80a26_d97f_4344_8489_cfa0dbbaf99f.slice/crio-conmon-488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod080ab56b_8da0_4b11_9595_6766031cfb41.slice/crio-05eda86e5101745a78fcdad654dc6c8f82aae8aea4ce49fde19c46f81ea69425\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d974fcb_fbad_4f24_9857_a791205029a0.slice/crio-92d0db8d8b072c3e4de51c5551db36e1144d3d4cb9f2282e4c669091d0ad501c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod41b80a26_d97f_4344_8489_cfa0dbbaf99f.slice/crio-efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5df3d575_c461_43d1_af22_d9f15aaf06b7.slice/crio-17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.755807 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef9042c2-4451-4ab0-9197-6097ec01cd56" path="/var/lib/kubelet/pods/ef9042c2-4451-4ab0-9197-6097ec01cd56/volumes" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.948967 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.986114 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wd4sq\" (UniqueName: \"kubernetes.io/projected/41b80a26-d97f-4344-8489-cfa0dbbaf99f-kube-api-access-wd4sq\") pod \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.986943 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/41b80a26-d97f-4344-8489-cfa0dbbaf99f-horizon-secret-key\") pod \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.987116 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-scripts\") pod \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.987540 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-config-data\") pod \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.990803 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41b80a26-d97f-4344-8489-cfa0dbbaf99f-logs\") pod \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\" (UID: \"41b80a26-d97f-4344-8489-cfa0dbbaf99f\") " Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.991608 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41b80a26-d97f-4344-8489-cfa0dbbaf99f-logs" (OuterVolumeSpecName: "logs") pod "41b80a26-d97f-4344-8489-cfa0dbbaf99f" (UID: "41b80a26-d97f-4344-8489-cfa0dbbaf99f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.994375 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41b80a26-d97f-4344-8489-cfa0dbbaf99f-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:45 crc kubenswrapper[4687]: I1125 09:23:45.998669 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41b80a26-d97f-4344-8489-cfa0dbbaf99f-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "41b80a26-d97f-4344-8489-cfa0dbbaf99f" (UID: "41b80a26-d97f-4344-8489-cfa0dbbaf99f"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.013699 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41b80a26-d97f-4344-8489-cfa0dbbaf99f-kube-api-access-wd4sq" (OuterVolumeSpecName: "kube-api-access-wd4sq") pod "41b80a26-d97f-4344-8489-cfa0dbbaf99f" (UID: "41b80a26-d97f-4344-8489-cfa0dbbaf99f"). InnerVolumeSpecName "kube-api-access-wd4sq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.044901 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-scripts" (OuterVolumeSpecName: "scripts") pod "41b80a26-d97f-4344-8489-cfa0dbbaf99f" (UID: "41b80a26-d97f-4344-8489-cfa0dbbaf99f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.046143 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-config-data" (OuterVolumeSpecName: "config-data") pod "41b80a26-d97f-4344-8489-cfa0dbbaf99f" (UID: "41b80a26-d97f-4344-8489-cfa0dbbaf99f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.084942 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2b8071e8-54ac-4251-9604-d0dd6da182e2","Type":"ContainerStarted","Data":"f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.096115 4687 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/41b80a26-d97f-4344-8489-cfa0dbbaf99f-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.096143 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.096153 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/41b80a26-d97f-4344-8489-cfa0dbbaf99f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.096161 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wd4sq\" (UniqueName: \"kubernetes.io/projected/41b80a26-d97f-4344-8489-cfa0dbbaf99f-kube-api-access-wd4sq\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.099932 4687 generic.go:334] "Generic (PLEG): container finished" podID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" containerID="efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc" exitCode=137 Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.099969 4687 generic.go:334] "Generic (PLEG): container finished" podID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" containerID="488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac" exitCode=137 Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.100001 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5788b95877-j7l5l" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.100065 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5788b95877-j7l5l" event={"ID":"41b80a26-d97f-4344-8489-cfa0dbbaf99f","Type":"ContainerDied","Data":"efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.100113 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5788b95877-j7l5l" event={"ID":"41b80a26-d97f-4344-8489-cfa0dbbaf99f","Type":"ContainerDied","Data":"488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.100133 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5788b95877-j7l5l" event={"ID":"41b80a26-d97f-4344-8489-cfa0dbbaf99f","Type":"ContainerDied","Data":"df28394b0e78e92fe9f4c377eb084c04147c19e0991a1ba4fd0bf42b74460586"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.100153 4687 scope.go:117] "RemoveContainer" containerID="efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.114150 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420","Type":"ContainerStarted","Data":"4df5b583158852556117df61f66ea53428e9294804d920e916bbe4ec35360a66"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.115085 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.118251 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.122019 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" event={"ID":"a6ddcca5-c362-40ff-94ab-feb6330cc792","Type":"ContainerStarted","Data":"10cc38fe6b9696c688a29bf40f289e16aa12d51afd4b9613b42156ae1d77acce"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.122379 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.126108 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.409303049 podStartE2EDuration="4.126092637s" podCreationTimestamp="2025-11-25 09:23:42 +0000 UTC" firstStartedPulling="2025-11-25 09:23:43.039086263 +0000 UTC m=+1218.092725981" lastFinishedPulling="2025-11-25 09:23:43.755875851 +0000 UTC m=+1218.809515569" observedRunningTime="2025-11-25 09:23:46.111336204 +0000 UTC m=+1221.164975922" watchObservedRunningTime="2025-11-25 09:23:46.126092637 +0000 UTC m=+1221.179732355" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.126414 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.127881 4687 generic.go:334] "Generic (PLEG): container finished" podID="5df3d575-c461-43d1-af22-d9f15aaf06b7" containerID="17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6" exitCode=137 Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.127918 4687 generic.go:334] "Generic (PLEG): container finished" podID="5df3d575-c461-43d1-af22-d9f15aaf06b7" containerID="ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7" exitCode=137 Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.127962 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b5c8fb8f9-zv6j6" event={"ID":"5df3d575-c461-43d1-af22-d9f15aaf06b7","Type":"ContainerDied","Data":"17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.127992 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b5c8fb8f9-zv6j6" event={"ID":"5df3d575-c461-43d1-af22-d9f15aaf06b7","Type":"ContainerDied","Data":"ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.128005 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b5c8fb8f9-zv6j6" event={"ID":"5df3d575-c461-43d1-af22-d9f15aaf06b7","Type":"ContainerDied","Data":"19d04e3b92d2ce26ee3566195dbdcb17347c98efcc0bd5078a5256a9019a6bf8"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.128064 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b5c8fb8f9-zv6j6" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.138936 4687 generic.go:334] "Generic (PLEG): container finished" podID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" containerID="e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2" exitCode=137 Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.138980 4687 generic.go:334] "Generic (PLEG): container finished" podID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" containerID="86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d" exitCode=137 Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.139356 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86977dc76f-6cpw4" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.139365 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86977dc76f-6cpw4" event={"ID":"d875a4e8-0091-41cb-9a53-2cd74bdf853c","Type":"ContainerDied","Data":"e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.139751 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86977dc76f-6cpw4" event={"ID":"d875a4e8-0091-41cb-9a53-2cd74bdf853c","Type":"ContainerDied","Data":"86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.139773 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86977dc76f-6cpw4" event={"ID":"d875a4e8-0091-41cb-9a53-2cd74bdf853c","Type":"ContainerDied","Data":"73d9eeb28e45503f54a315a59e1ace2d9db6bf255d1625e97d2497b2e00220b3"} Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.157808 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.157784033 podStartE2EDuration="4.157784033s" podCreationTimestamp="2025-11-25 09:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:46.154573265 +0000 UTC m=+1221.208212973" watchObservedRunningTime="2025-11-25 09:23:46.157784033 +0000 UTC m=+1221.211423751" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.186292 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.198761 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d875a4e8-0091-41cb-9a53-2cd74bdf853c-horizon-secret-key\") pod \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.198829 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-config-data\") pod \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.198856 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nx9lg\" (UniqueName: \"kubernetes.io/projected/5df3d575-c461-43d1-af22-d9f15aaf06b7-kube-api-access-nx9lg\") pod \"5df3d575-c461-43d1-af22-d9f15aaf06b7\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.198888 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8nzz\" (UniqueName: \"kubernetes.io/projected/d875a4e8-0091-41cb-9a53-2cd74bdf853c-kube-api-access-g8nzz\") pod \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.199016 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-config-data\") pod \"5df3d575-c461-43d1-af22-d9f15aaf06b7\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.199093 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d875a4e8-0091-41cb-9a53-2cd74bdf853c-logs\") pod \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.199168 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5df3d575-c461-43d1-af22-d9f15aaf06b7-logs\") pod \"5df3d575-c461-43d1-af22-d9f15aaf06b7\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.199267 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-scripts\") pod \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\" (UID: \"d875a4e8-0091-41cb-9a53-2cd74bdf853c\") " Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.199322 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-scripts\") pod \"5df3d575-c461-43d1-af22-d9f15aaf06b7\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.199371 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5df3d575-c461-43d1-af22-d9f15aaf06b7-horizon-secret-key\") pod \"5df3d575-c461-43d1-af22-d9f15aaf06b7\" (UID: \"5df3d575-c461-43d1-af22-d9f15aaf06b7\") " Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.202422 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5788b95877-j7l5l"] Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.206760 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d875a4e8-0091-41cb-9a53-2cd74bdf853c-logs" (OuterVolumeSpecName: "logs") pod "d875a4e8-0091-41cb-9a53-2cd74bdf853c" (UID: "d875a4e8-0091-41cb-9a53-2cd74bdf853c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.206970 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5df3d575-c461-43d1-af22-d9f15aaf06b7-logs" (OuterVolumeSpecName: "logs") pod "5df3d575-c461-43d1-af22-d9f15aaf06b7" (UID: "5df3d575-c461-43d1-af22-d9f15aaf06b7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.209434 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d875a4e8-0091-41cb-9a53-2cd74bdf853c-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "d875a4e8-0091-41cb-9a53-2cd74bdf853c" (UID: "d875a4e8-0091-41cb-9a53-2cd74bdf853c"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.209872 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d875a4e8-0091-41cb-9a53-2cd74bdf853c-kube-api-access-g8nzz" (OuterVolumeSpecName: "kube-api-access-g8nzz") pod "d875a4e8-0091-41cb-9a53-2cd74bdf853c" (UID: "d875a4e8-0091-41cb-9a53-2cd74bdf853c"). InnerVolumeSpecName "kube-api-access-g8nzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.211006 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5df3d575-c461-43d1-af22-d9f15aaf06b7-kube-api-access-nx9lg" (OuterVolumeSpecName: "kube-api-access-nx9lg") pod "5df3d575-c461-43d1-af22-d9f15aaf06b7" (UID: "5df3d575-c461-43d1-af22-d9f15aaf06b7"). InnerVolumeSpecName "kube-api-access-nx9lg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.212852 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5788b95877-j7l5l"] Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.219860 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" podStartSLOduration=4.219838958 podStartE2EDuration="4.219838958s" podCreationTimestamp="2025-11-25 09:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:46.201932258 +0000 UTC m=+1221.255571986" watchObservedRunningTime="2025-11-25 09:23:46.219838958 +0000 UTC m=+1221.273478676" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.227779 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5df3d575-c461-43d1-af22-d9f15aaf06b7-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "5df3d575-c461-43d1-af22-d9f15aaf06b7" (UID: "5df3d575-c461-43d1-af22-d9f15aaf06b7"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.251275 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-scripts" (OuterVolumeSpecName: "scripts") pod "d875a4e8-0091-41cb-9a53-2cd74bdf853c" (UID: "d875a4e8-0091-41cb-9a53-2cd74bdf853c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.252810 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-config-data" (OuterVolumeSpecName: "config-data") pod "5df3d575-c461-43d1-af22-d9f15aaf06b7" (UID: "5df3d575-c461-43d1-af22-d9f15aaf06b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.260209 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-scripts" (OuterVolumeSpecName: "scripts") pod "5df3d575-c461-43d1-af22-d9f15aaf06b7" (UID: "5df3d575-c461-43d1-af22-d9f15aaf06b7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.263929 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-config-data" (OuterVolumeSpecName: "config-data") pod "d875a4e8-0091-41cb-9a53-2cd74bdf853c" (UID: "d875a4e8-0091-41cb-9a53-2cd74bdf853c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.301185 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5df3d575-c461-43d1-af22-d9f15aaf06b7-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.301401 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.301466 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.301584 4687 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5df3d575-c461-43d1-af22-d9f15aaf06b7-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.301644 4687 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d875a4e8-0091-41cb-9a53-2cd74bdf853c-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.301694 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d875a4e8-0091-41cb-9a53-2cd74bdf853c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.301746 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nx9lg\" (UniqueName: \"kubernetes.io/projected/5df3d575-c461-43d1-af22-d9f15aaf06b7-kube-api-access-nx9lg\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.301805 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8nzz\" (UniqueName: \"kubernetes.io/projected/d875a4e8-0091-41cb-9a53-2cd74bdf853c-kube-api-access-g8nzz\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.301895 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5df3d575-c461-43d1-af22-d9f15aaf06b7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.301946 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d875a4e8-0091-41cb-9a53-2cd74bdf853c-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.309311 4687 scope.go:117] "RemoveContainer" containerID="488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.350287 4687 scope.go:117] "RemoveContainer" containerID="efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc" Nov 25 09:23:46 crc kubenswrapper[4687]: E1125 09:23:46.351343 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc\": container with ID starting with efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc not found: ID does not exist" containerID="efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.351378 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc"} err="failed to get container status \"efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc\": rpc error: code = NotFound desc = could not find container \"efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc\": container with ID starting with efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.351406 4687 scope.go:117] "RemoveContainer" containerID="488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac" Nov 25 09:23:46 crc kubenswrapper[4687]: E1125 09:23:46.351706 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac\": container with ID starting with 488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac not found: ID does not exist" containerID="488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.351732 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac"} err="failed to get container status \"488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac\": rpc error: code = NotFound desc = could not find container \"488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac\": container with ID starting with 488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.351746 4687 scope.go:117] "RemoveContainer" containerID="efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.352433 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc"} err="failed to get container status \"efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc\": rpc error: code = NotFound desc = could not find container \"efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc\": container with ID starting with efe8191e8265cb34dbd18530c62eb2172d1ee33cb120ba637460a782929b2bbc not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.352513 4687 scope.go:117] "RemoveContainer" containerID="488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.352838 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac"} err="failed to get container status \"488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac\": rpc error: code = NotFound desc = could not find container \"488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac\": container with ID starting with 488eb876ed48587f3b0483046e9f10f4d55e0a74e3a75c3081de304ea303c4ac not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.352888 4687 scope.go:117] "RemoveContainer" containerID="17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.556288 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5b5c8fb8f9-zv6j6"] Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.566533 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5b5c8fb8f9-zv6j6"] Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.570595 4687 scope.go:117] "RemoveContainer" containerID="ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.577638 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-86977dc76f-6cpw4"] Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.585554 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-86977dc76f-6cpw4"] Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.595687 4687 scope.go:117] "RemoveContainer" containerID="17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6" Nov 25 09:23:46 crc kubenswrapper[4687]: E1125 09:23:46.596100 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6\": container with ID starting with 17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6 not found: ID does not exist" containerID="17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.596135 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6"} err="failed to get container status \"17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6\": rpc error: code = NotFound desc = could not find container \"17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6\": container with ID starting with 17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6 not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.596163 4687 scope.go:117] "RemoveContainer" containerID="ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7" Nov 25 09:23:46 crc kubenswrapper[4687]: E1125 09:23:46.596530 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7\": container with ID starting with ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7 not found: ID does not exist" containerID="ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.596558 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7"} err="failed to get container status \"ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7\": rpc error: code = NotFound desc = could not find container \"ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7\": container with ID starting with ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7 not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.596575 4687 scope.go:117] "RemoveContainer" containerID="17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.596819 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6"} err="failed to get container status \"17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6\": rpc error: code = NotFound desc = could not find container \"17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6\": container with ID starting with 17238923e26e3d2d96e4944b1bedd12f5c0810e0c9d223e23ed55444902e1ed6 not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.596840 4687 scope.go:117] "RemoveContainer" containerID="ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.597041 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7"} err="failed to get container status \"ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7\": rpc error: code = NotFound desc = could not find container \"ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7\": container with ID starting with ea5c36b616c5e02c1869d3c22c1856893ebd5facf702a4e075dc9fcd64e5c0f7 not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.597062 4687 scope.go:117] "RemoveContainer" containerID="e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.787332 4687 scope.go:117] "RemoveContainer" containerID="86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.812107 4687 scope.go:117] "RemoveContainer" containerID="e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2" Nov 25 09:23:46 crc kubenswrapper[4687]: E1125 09:23:46.812582 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2\": container with ID starting with e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2 not found: ID does not exist" containerID="e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.812642 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2"} err="failed to get container status \"e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2\": rpc error: code = NotFound desc = could not find container \"e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2\": container with ID starting with e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2 not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.812675 4687 scope.go:117] "RemoveContainer" containerID="86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d" Nov 25 09:23:46 crc kubenswrapper[4687]: E1125 09:23:46.813039 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d\": container with ID starting with 86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d not found: ID does not exist" containerID="86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.813063 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d"} err="failed to get container status \"86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d\": rpc error: code = NotFound desc = could not find container \"86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d\": container with ID starting with 86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.813079 4687 scope.go:117] "RemoveContainer" containerID="e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.813664 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2"} err="failed to get container status \"e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2\": rpc error: code = NotFound desc = could not find container \"e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2\": container with ID starting with e20539092e82e4acacb5393fe7a05f3acee326479837f8e4e07cce2cc47e24b2 not found: ID does not exist" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.813738 4687 scope.go:117] "RemoveContainer" containerID="86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d" Nov 25 09:23:46 crc kubenswrapper[4687]: I1125 09:23:46.814025 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d"} err="failed to get container status \"86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d\": rpc error: code = NotFound desc = could not find container \"86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d\": container with ID starting with 86a24efef2c514f47481598a270e082c67bdd7c83e5c9e1ef82328cb9e591d4d not found: ID does not exist" Nov 25 09:23:47 crc kubenswrapper[4687]: I1125 09:23:47.612360 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 09:23:47 crc kubenswrapper[4687]: I1125 09:23:47.755216 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" path="/var/lib/kubelet/pods/41b80a26-d97f-4344-8489-cfa0dbbaf99f/volumes" Nov 25 09:23:47 crc kubenswrapper[4687]: I1125 09:23:47.757158 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5df3d575-c461-43d1-af22-d9f15aaf06b7" path="/var/lib/kubelet/pods/5df3d575-c461-43d1-af22-d9f15aaf06b7/volumes" Nov 25 09:23:47 crc kubenswrapper[4687]: I1125 09:23:47.757896 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" path="/var/lib/kubelet/pods/d875a4e8-0091-41cb-9a53-2cd74bdf853c/volumes" Nov 25 09:23:48 crc kubenswrapper[4687]: I1125 09:23:48.179274 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" containerName="cinder-api-log" containerID="cri-o://228c140127f40b55adc70328d5a7dea612a2bb1d580239a78306d359ef96a17c" gracePeriod=30 Nov 25 09:23:48 crc kubenswrapper[4687]: I1125 09:23:48.179625 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" containerName="cinder-api" containerID="cri-o://4df5b583158852556117df61f66ea53428e9294804d920e916bbe4ec35360a66" gracePeriod=30 Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.190650 4687 generic.go:334] "Generic (PLEG): container finished" podID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" containerID="4df5b583158852556117df61f66ea53428e9294804d920e916bbe4ec35360a66" exitCode=0 Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.190903 4687 generic.go:334] "Generic (PLEG): container finished" podID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" containerID="228c140127f40b55adc70328d5a7dea612a2bb1d580239a78306d359ef96a17c" exitCode=143 Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.190924 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420","Type":"ContainerDied","Data":"4df5b583158852556117df61f66ea53428e9294804d920e916bbe4ec35360a66"} Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.190947 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420","Type":"ContainerDied","Data":"228c140127f40b55adc70328d5a7dea612a2bb1d580239a78306d359ef96a17c"} Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.198259 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.273286 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.490165 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.575897 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data\") pod \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.576036 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data-custom\") pod \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.576110 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-scripts\") pod \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.576149 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-etc-machine-id\") pod \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.576286 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwzvt\" (UniqueName: \"kubernetes.io/projected/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-kube-api-access-fwzvt\") pod \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.576326 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-logs\") pod \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.576376 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-combined-ca-bundle\") pod \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\" (UID: \"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420\") " Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.576659 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-logs" (OuterVolumeSpecName: "logs") pod "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" (UID: "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.576454 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" (UID: "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.576976 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.582819 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-kube-api-access-fwzvt" (OuterVolumeSpecName: "kube-api-access-fwzvt") pod "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" (UID: "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420"). InnerVolumeSpecName "kube-api-access-fwzvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.584251 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" (UID: "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.609780 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-scripts" (OuterVolumeSpecName: "scripts") pod "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" (UID: "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.636403 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" (UID: "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.680074 4687 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.680123 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.680138 4687 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.680151 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwzvt\" (UniqueName: \"kubernetes.io/projected/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-kube-api-access-fwzvt\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.680166 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.698650 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data" (OuterVolumeSpecName: "config-data") pod "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" (UID: "87cc15f6-7b3f-4e0f-b94a-83b79eb1c420"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:49 crc kubenswrapper[4687]: I1125 09:23:49.780975 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.201158 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"87cc15f6-7b3f-4e0f-b94a-83b79eb1c420","Type":"ContainerDied","Data":"94baf5477978c8425698a6ed27bf3c87f81b44e5b8dab0fa4790a4036705c0e0"} Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.201209 4687 scope.go:117] "RemoveContainer" containerID="4df5b583158852556117df61f66ea53428e9294804d920e916bbe4ec35360a66" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.201344 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.249345 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.249877 4687 scope.go:117] "RemoveContainer" containerID="228c140127f40b55adc70328d5a7dea612a2bb1d580239a78306d359ef96a17c" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.276540 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.289555 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:23:50 crc kubenswrapper[4687]: E1125 09:23:50.289914 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" containerName="horizon-log" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.289925 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" containerName="horizon-log" Nov 25 09:23:50 crc kubenswrapper[4687]: E1125 09:23:50.289941 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" containerName="horizon-log" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.289947 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" containerName="horizon-log" Nov 25 09:23:50 crc kubenswrapper[4687]: E1125 09:23:50.289955 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5df3d575-c461-43d1-af22-d9f15aaf06b7" containerName="horizon" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.289961 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="5df3d575-c461-43d1-af22-d9f15aaf06b7" containerName="horizon" Nov 25 09:23:50 crc kubenswrapper[4687]: E1125 09:23:50.289971 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" containerName="cinder-api" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.289976 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" containerName="cinder-api" Nov 25 09:23:50 crc kubenswrapper[4687]: E1125 09:23:50.289986 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" containerName="cinder-api-log" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.289992 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" containerName="cinder-api-log" Nov 25 09:23:50 crc kubenswrapper[4687]: E1125 09:23:50.290009 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef9042c2-4451-4ab0-9197-6097ec01cd56" containerName="dnsmasq-dns" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290014 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef9042c2-4451-4ab0-9197-6097ec01cd56" containerName="dnsmasq-dns" Nov 25 09:23:50 crc kubenswrapper[4687]: E1125 09:23:50.290024 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" containerName="horizon" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290030 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" containerName="horizon" Nov 25 09:23:50 crc kubenswrapper[4687]: E1125 09:23:50.290040 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5df3d575-c461-43d1-af22-d9f15aaf06b7" containerName="horizon-log" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290047 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="5df3d575-c461-43d1-af22-d9f15aaf06b7" containerName="horizon-log" Nov 25 09:23:50 crc kubenswrapper[4687]: E1125 09:23:50.290059 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" containerName="horizon" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290065 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" containerName="horizon" Nov 25 09:23:50 crc kubenswrapper[4687]: E1125 09:23:50.290076 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef9042c2-4451-4ab0-9197-6097ec01cd56" containerName="init" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290082 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef9042c2-4451-4ab0-9197-6097ec01cd56" containerName="init" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290238 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" containerName="horizon-log" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290253 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b80a26-d97f-4344-8489-cfa0dbbaf99f" containerName="horizon" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290265 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" containerName="cinder-api-log" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290273 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="5df3d575-c461-43d1-af22-d9f15aaf06b7" containerName="horizon" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290285 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef9042c2-4451-4ab0-9197-6097ec01cd56" containerName="dnsmasq-dns" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290296 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" containerName="cinder-api" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290307 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="5df3d575-c461-43d1-af22-d9f15aaf06b7" containerName="horizon-log" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290314 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" containerName="horizon-log" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.290323 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d875a4e8-0091-41cb-9a53-2cd74bdf853c" containerName="horizon" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.291263 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.298111 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.298376 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.298482 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.303604 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.492167 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.492224 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-scripts\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.492255 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kd4q\" (UniqueName: \"kubernetes.io/projected/bc9b51e3-3417-4e5d-86f6-2322c956f540-kube-api-access-2kd4q\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.492317 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.492393 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bc9b51e3-3417-4e5d-86f6-2322c956f540-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.492464 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-config-data-custom\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.492571 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-config-data\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.492604 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.492632 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc9b51e3-3417-4e5d-86f6-2322c956f540-logs\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.593947 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-config-data\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.594000 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.594684 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc9b51e3-3417-4e5d-86f6-2322c956f540-logs\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.594774 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.594809 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-scripts\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.594843 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kd4q\" (UniqueName: \"kubernetes.io/projected/bc9b51e3-3417-4e5d-86f6-2322c956f540-kube-api-access-2kd4q\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.594936 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.594985 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bc9b51e3-3417-4e5d-86f6-2322c956f540-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.595058 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-config-data-custom\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.595664 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc9b51e3-3417-4e5d-86f6-2322c956f540-logs\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.596161 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bc9b51e3-3417-4e5d-86f6-2322c956f540-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.601345 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.602173 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-config-data-custom\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.603245 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-config-data\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.603888 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-scripts\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.608930 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.609140 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc9b51e3-3417-4e5d-86f6-2322c956f540-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.627986 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kd4q\" (UniqueName: \"kubernetes.io/projected/bc9b51e3-3417-4e5d-86f6-2322c956f540-kube-api-access-2kd4q\") pod \"cinder-api-0\" (UID: \"bc9b51e3-3417-4e5d-86f6-2322c956f540\") " pod="openstack/cinder-api-0" Nov 25 09:23:50 crc kubenswrapper[4687]: I1125 09:23:50.631902 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:23:51 crc kubenswrapper[4687]: I1125 09:23:51.140337 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:23:51 crc kubenswrapper[4687]: I1125 09:23:51.156610 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-69b7bcc78d-r6t7q" Nov 25 09:23:51 crc kubenswrapper[4687]: I1125 09:23:51.221402 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:23:51 crc kubenswrapper[4687]: I1125 09:23:51.238688 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7dc8446cb-d6wz7"] Nov 25 09:23:51 crc kubenswrapper[4687]: I1125 09:23:51.238962 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7dc8446cb-d6wz7" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon-log" containerID="cri-o://104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66" gracePeriod=30 Nov 25 09:23:51 crc kubenswrapper[4687]: I1125 09:23:51.239334 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7dc8446cb-d6wz7" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon" containerID="cri-o://de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb" gracePeriod=30 Nov 25 09:23:51 crc kubenswrapper[4687]: I1125 09:23:51.750771 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cc15f6-7b3f-4e0f-b94a-83b79eb1c420" path="/var/lib/kubelet/pods/87cc15f6-7b3f-4e0f-b94a-83b79eb1c420/volumes" Nov 25 09:23:52 crc kubenswrapper[4687]: I1125 09:23:52.102089 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:52 crc kubenswrapper[4687]: I1125 09:23:52.124354 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:52 crc kubenswrapper[4687]: I1125 09:23:52.235485 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bc9b51e3-3417-4e5d-86f6-2322c956f540","Type":"ContainerStarted","Data":"3bd83219b1a6fdc42ee93e43dad7532b24a27046189a10fc436f91ad1cb1f6bf"} Nov 25 09:23:52 crc kubenswrapper[4687]: I1125 09:23:52.235549 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bc9b51e3-3417-4e5d-86f6-2322c956f540","Type":"ContainerStarted","Data":"1f31b477ebeef08400d2c0e100b9e6bb6058e453d990520e064f2bff60f1c5c8"} Nov 25 09:23:52 crc kubenswrapper[4687]: I1125 09:23:52.753552 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:23:52 crc kubenswrapper[4687]: I1125 09:23:52.825832 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-4q4h4"] Nov 25 09:23:52 crc kubenswrapper[4687]: I1125 09:23:52.826066 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" podUID="b62e6ed0-d60b-46ea-b90e-a299f8b980cc" containerName="dnsmasq-dns" containerID="cri-o://63afb34fc959015c64d47b479eee835214c16ad75d3007efb2b744e6936d461d" gracePeriod=10 Nov 25 09:23:52 crc kubenswrapper[4687]: I1125 09:23:52.994996 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.046446 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.251973 4687 generic.go:334] "Generic (PLEG): container finished" podID="b62e6ed0-d60b-46ea-b90e-a299f8b980cc" containerID="63afb34fc959015c64d47b479eee835214c16ad75d3007efb2b744e6936d461d" exitCode=0 Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.252029 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" event={"ID":"b62e6ed0-d60b-46ea-b90e-a299f8b980cc","Type":"ContainerDied","Data":"63afb34fc959015c64d47b479eee835214c16ad75d3007efb2b744e6936d461d"} Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.255181 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="2b8071e8-54ac-4251-9604-d0dd6da182e2" containerName="cinder-scheduler" containerID="cri-o://2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5" gracePeriod=30 Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.255998 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bc9b51e3-3417-4e5d-86f6-2322c956f540","Type":"ContainerStarted","Data":"698f4c983d6900217da3ebc0acc17680b61b396ebb26e1232b20e742df7010e7"} Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.256264 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="2b8071e8-54ac-4251-9604-d0dd6da182e2" containerName="probe" containerID="cri-o://f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23" gracePeriod=30 Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.256700 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.283257 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.2832195459999998 podStartE2EDuration="3.283219546s" podCreationTimestamp="2025-11-25 09:23:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:53.282519676 +0000 UTC m=+1228.336159384" watchObservedRunningTime="2025-11-25 09:23:53.283219546 +0000 UTC m=+1228.336859274" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.703939 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.806352 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-nb\") pod \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.806394 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-config\") pod \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.806417 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llrgt\" (UniqueName: \"kubernetes.io/projected/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-kube-api-access-llrgt\") pod \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.806534 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-sb\") pod \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.806609 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-swift-storage-0\") pod \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.806668 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-svc\") pod \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\" (UID: \"b62e6ed0-d60b-46ea-b90e-a299f8b980cc\") " Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.813182 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-kube-api-access-llrgt" (OuterVolumeSpecName: "kube-api-access-llrgt") pod "b62e6ed0-d60b-46ea-b90e-a299f8b980cc" (UID: "b62e6ed0-d60b-46ea-b90e-a299f8b980cc"). InnerVolumeSpecName "kube-api-access-llrgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.844578 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.844654 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.875678 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b62e6ed0-d60b-46ea-b90e-a299f8b980cc" (UID: "b62e6ed0-d60b-46ea-b90e-a299f8b980cc"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.883457 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b62e6ed0-d60b-46ea-b90e-a299f8b980cc" (UID: "b62e6ed0-d60b-46ea-b90e-a299f8b980cc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.891939 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-config" (OuterVolumeSpecName: "config") pod "b62e6ed0-d60b-46ea-b90e-a299f8b980cc" (UID: "b62e6ed0-d60b-46ea-b90e-a299f8b980cc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.894552 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b62e6ed0-d60b-46ea-b90e-a299f8b980cc" (UID: "b62e6ed0-d60b-46ea-b90e-a299f8b980cc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.902900 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b62e6ed0-d60b-46ea-b90e-a299f8b980cc" (UID: "b62e6ed0-d60b-46ea-b90e-a299f8b980cc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.909169 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.909236 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.909253 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llrgt\" (UniqueName: \"kubernetes.io/projected/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-kube-api-access-llrgt\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.909266 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.909277 4687 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:53 crc kubenswrapper[4687]: I1125 09:23:53.909306 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b62e6ed0-d60b-46ea-b90e-a299f8b980cc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:54 crc kubenswrapper[4687]: I1125 09:23:54.267139 4687 generic.go:334] "Generic (PLEG): container finished" podID="2b8071e8-54ac-4251-9604-d0dd6da182e2" containerID="f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23" exitCode=0 Nov 25 09:23:54 crc kubenswrapper[4687]: I1125 09:23:54.268286 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2b8071e8-54ac-4251-9604-d0dd6da182e2","Type":"ContainerDied","Data":"f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23"} Nov 25 09:23:54 crc kubenswrapper[4687]: I1125 09:23:54.269471 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" event={"ID":"b62e6ed0-d60b-46ea-b90e-a299f8b980cc","Type":"ContainerDied","Data":"63b81874f33ee900b0d99035e2a18ca597f92592576d2968be68ce9e9d162e2a"} Nov 25 09:23:54 crc kubenswrapper[4687]: I1125 09:23:54.269596 4687 scope.go:117] "RemoveContainer" containerID="63afb34fc959015c64d47b479eee835214c16ad75d3007efb2b744e6936d461d" Nov 25 09:23:54 crc kubenswrapper[4687]: I1125 09:23:54.269534 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-4q4h4" Nov 25 09:23:54 crc kubenswrapper[4687]: I1125 09:23:54.310074 4687 scope.go:117] "RemoveContainer" containerID="9a8c18c38bea044113284adf7d183b3031ec16a48097337185a937b5f8d679a4" Nov 25 09:23:54 crc kubenswrapper[4687]: I1125 09:23:54.316022 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-4q4h4"] Nov 25 09:23:54 crc kubenswrapper[4687]: I1125 09:23:54.329139 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-4q4h4"] Nov 25 09:23:55 crc kubenswrapper[4687]: I1125 09:23:55.082776 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:55 crc kubenswrapper[4687]: I1125 09:23:55.089971 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6fd7f79f6b-n9xqk" Nov 25 09:23:55 crc kubenswrapper[4687]: I1125 09:23:55.189169 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-74498bff74-qgdp6"] Nov 25 09:23:55 crc kubenswrapper[4687]: I1125 09:23:55.189658 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-74498bff74-qgdp6" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerName="barbican-api-log" containerID="cri-o://7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0" gracePeriod=30 Nov 25 09:23:55 crc kubenswrapper[4687]: I1125 09:23:55.189885 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-74498bff74-qgdp6" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerName="barbican-api" containerID="cri-o://ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f" gracePeriod=30 Nov 25 09:23:55 crc kubenswrapper[4687]: I1125 09:23:55.341129 4687 generic.go:334] "Generic (PLEG): container finished" podID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerID="de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb" exitCode=0 Nov 25 09:23:55 crc kubenswrapper[4687]: I1125 09:23:55.342118 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7dc8446cb-d6wz7" event={"ID":"ea89f490-5a54-46eb-9b1c-1eb96dd181da","Type":"ContainerDied","Data":"de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb"} Nov 25 09:23:55 crc kubenswrapper[4687]: I1125 09:23:55.754027 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b62e6ed0-d60b-46ea-b90e-a299f8b980cc" path="/var/lib/kubelet/pods/b62e6ed0-d60b-46ea-b90e-a299f8b980cc/volumes" Nov 25 09:23:55 crc kubenswrapper[4687]: E1125 09:23:55.927961 4687 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b8071e8_54ac_4251_9604_d0dd6da182e2.slice/crio-2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b8071e8_54ac_4251_9604_d0dd6da182e2.slice/crio-conmon-2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.320378 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.367163 4687 generic.go:334] "Generic (PLEG): container finished" podID="2b8071e8-54ac-4251-9604-d0dd6da182e2" containerID="2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5" exitCode=0 Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.367239 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2b8071e8-54ac-4251-9604-d0dd6da182e2","Type":"ContainerDied","Data":"2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5"} Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.367265 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2b8071e8-54ac-4251-9604-d0dd6da182e2","Type":"ContainerDied","Data":"cf834602fdba5ac2ae2ab02a9982e99656cbd428dd4d568e81ea79791f797ad7"} Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.367282 4687 scope.go:117] "RemoveContainer" containerID="f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.367464 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.379329 4687 generic.go:334] "Generic (PLEG): container finished" podID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerID="7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0" exitCode=143 Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.379386 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74498bff74-qgdp6" event={"ID":"050fcce2-c1d7-49d2-873a-0e85d090174e","Type":"ContainerDied","Data":"7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0"} Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.393974 4687 scope.go:117] "RemoveContainer" containerID="2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.421468 4687 scope.go:117] "RemoveContainer" containerID="f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23" Nov 25 09:23:56 crc kubenswrapper[4687]: E1125 09:23:56.422077 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23\": container with ID starting with f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23 not found: ID does not exist" containerID="f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.422131 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23"} err="failed to get container status \"f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23\": rpc error: code = NotFound desc = could not find container \"f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23\": container with ID starting with f0beec2d869c8e34222ad9d9af9f0c268df6afd911d8c439a84429cd098f1f23 not found: ID does not exist" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.422165 4687 scope.go:117] "RemoveContainer" containerID="2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5" Nov 25 09:23:56 crc kubenswrapper[4687]: E1125 09:23:56.422747 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5\": container with ID starting with 2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5 not found: ID does not exist" containerID="2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.422774 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5"} err="failed to get container status \"2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5\": rpc error: code = NotFound desc = could not find container \"2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5\": container with ID starting with 2b4765c45fc7aab56d824a7cba4a97df31de41b6c1ca1235ce2ad2d566fbdcc5 not found: ID does not exist" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.456056 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-combined-ca-bundle\") pod \"2b8071e8-54ac-4251-9604-d0dd6da182e2\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.456096 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data-custom\") pod \"2b8071e8-54ac-4251-9604-d0dd6da182e2\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.456172 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2b8071e8-54ac-4251-9604-d0dd6da182e2-etc-machine-id\") pod \"2b8071e8-54ac-4251-9604-d0dd6da182e2\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.456198 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-scripts\") pod \"2b8071e8-54ac-4251-9604-d0dd6da182e2\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.456294 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data\") pod \"2b8071e8-54ac-4251-9604-d0dd6da182e2\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.456401 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9flbv\" (UniqueName: \"kubernetes.io/projected/2b8071e8-54ac-4251-9604-d0dd6da182e2-kube-api-access-9flbv\") pod \"2b8071e8-54ac-4251-9604-d0dd6da182e2\" (UID: \"2b8071e8-54ac-4251-9604-d0dd6da182e2\") " Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.457562 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2b8071e8-54ac-4251-9604-d0dd6da182e2-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2b8071e8-54ac-4251-9604-d0dd6da182e2" (UID: "2b8071e8-54ac-4251-9604-d0dd6da182e2"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.470965 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-scripts" (OuterVolumeSpecName: "scripts") pod "2b8071e8-54ac-4251-9604-d0dd6da182e2" (UID: "2b8071e8-54ac-4251-9604-d0dd6da182e2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.471221 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b8071e8-54ac-4251-9604-d0dd6da182e2-kube-api-access-9flbv" (OuterVolumeSpecName: "kube-api-access-9flbv") pod "2b8071e8-54ac-4251-9604-d0dd6da182e2" (UID: "2b8071e8-54ac-4251-9604-d0dd6da182e2"). InnerVolumeSpecName "kube-api-access-9flbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.473886 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2b8071e8-54ac-4251-9604-d0dd6da182e2" (UID: "2b8071e8-54ac-4251-9604-d0dd6da182e2"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.524671 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b8071e8-54ac-4251-9604-d0dd6da182e2" (UID: "2b8071e8-54ac-4251-9604-d0dd6da182e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.559986 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.560053 4687 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2b8071e8-54ac-4251-9604-d0dd6da182e2-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.560083 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9flbv\" (UniqueName: \"kubernetes.io/projected/2b8071e8-54ac-4251-9604-d0dd6da182e2-kube-api-access-9flbv\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.560108 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.560132 4687 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.579823 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data" (OuterVolumeSpecName: "config-data") pod "2b8071e8-54ac-4251-9604-d0dd6da182e2" (UID: "2b8071e8-54ac-4251-9604-d0dd6da182e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.661716 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8071e8-54ac-4251-9604-d0dd6da182e2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.707169 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.722672 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.730191 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:23:56 crc kubenswrapper[4687]: E1125 09:23:56.730720 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b8071e8-54ac-4251-9604-d0dd6da182e2" containerName="cinder-scheduler" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.730743 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b8071e8-54ac-4251-9604-d0dd6da182e2" containerName="cinder-scheduler" Nov 25 09:23:56 crc kubenswrapper[4687]: E1125 09:23:56.730771 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b62e6ed0-d60b-46ea-b90e-a299f8b980cc" containerName="dnsmasq-dns" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.730783 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b62e6ed0-d60b-46ea-b90e-a299f8b980cc" containerName="dnsmasq-dns" Nov 25 09:23:56 crc kubenswrapper[4687]: E1125 09:23:56.730802 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b8071e8-54ac-4251-9604-d0dd6da182e2" containerName="probe" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.730810 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b8071e8-54ac-4251-9604-d0dd6da182e2" containerName="probe" Nov 25 09:23:56 crc kubenswrapper[4687]: E1125 09:23:56.730839 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b62e6ed0-d60b-46ea-b90e-a299f8b980cc" containerName="init" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.730846 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b62e6ed0-d60b-46ea-b90e-a299f8b980cc" containerName="init" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.731045 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b62e6ed0-d60b-46ea-b90e-a299f8b980cc" containerName="dnsmasq-dns" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.731065 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b8071e8-54ac-4251-9604-d0dd6da182e2" containerName="cinder-scheduler" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.731079 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b8071e8-54ac-4251-9604-d0dd6da182e2" containerName="probe" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.732232 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.734299 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.738836 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.865615 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zl4j\" (UniqueName: \"kubernetes.io/projected/48978802-f2a2-41ad-bc63-e71b66b0747f-kube-api-access-4zl4j\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.866126 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.866165 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-scripts\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.866211 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.866298 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/48978802-f2a2-41ad-bc63-e71b66b0747f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.866933 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-config-data\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.968883 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.968978 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/48978802-f2a2-41ad-bc63-e71b66b0747f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.969034 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-config-data\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.969062 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zl4j\" (UniqueName: \"kubernetes.io/projected/48978802-f2a2-41ad-bc63-e71b66b0747f-kube-api-access-4zl4j\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.969134 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.969134 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/48978802-f2a2-41ad-bc63-e71b66b0747f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.969162 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-scripts\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.972432 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.972599 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-scripts\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.980473 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-config-data\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.983668 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48978802-f2a2-41ad-bc63-e71b66b0747f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:56 crc kubenswrapper[4687]: I1125 09:23:56.984995 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zl4j\" (UniqueName: \"kubernetes.io/projected/48978802-f2a2-41ad-bc63-e71b66b0747f-kube-api-access-4zl4j\") pod \"cinder-scheduler-0\" (UID: \"48978802-f2a2-41ad-bc63-e71b66b0747f\") " pod="openstack/cinder-scheduler-0" Nov 25 09:23:57 crc kubenswrapper[4687]: I1125 09:23:57.055526 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:23:57 crc kubenswrapper[4687]: I1125 09:23:57.338376 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:23:57 crc kubenswrapper[4687]: I1125 09:23:57.382367 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7dc8446cb-d6wz7" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Nov 25 09:23:57 crc kubenswrapper[4687]: I1125 09:23:57.473460 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:23:57 crc kubenswrapper[4687]: I1125 09:23:57.746476 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b8071e8-54ac-4251-9604-d0dd6da182e2" path="/var/lib/kubelet/pods/2b8071e8-54ac-4251-9604-d0dd6da182e2/volumes" Nov 25 09:23:58 crc kubenswrapper[4687]: I1125 09:23:58.410350 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"48978802-f2a2-41ad-bc63-e71b66b0747f","Type":"ContainerStarted","Data":"bc4560e2e2a76b75d354fc2478859357288dd9d876850db64de95be1ae5fd235"} Nov 25 09:23:58 crc kubenswrapper[4687]: I1125 09:23:58.410673 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"48978802-f2a2-41ad-bc63-e71b66b0747f","Type":"ContainerStarted","Data":"15008445180bbaf871e57cbf209a63b744d446d5bf8fceac46a0d79c0581e298"} Nov 25 09:23:58 crc kubenswrapper[4687]: I1125 09:23:58.419524 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74498bff74-qgdp6" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:47240->10.217.0.159:9311: read: connection reset by peer" Nov 25 09:23:58 crc kubenswrapper[4687]: I1125 09:23:58.419574 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-74498bff74-qgdp6" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:47252->10.217.0.159:9311: read: connection reset by peer" Nov 25 09:23:58 crc kubenswrapper[4687]: I1125 09:23:58.979620 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.120640 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-combined-ca-bundle\") pod \"050fcce2-c1d7-49d2-873a-0e85d090174e\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.120745 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data\") pod \"050fcce2-c1d7-49d2-873a-0e85d090174e\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.120890 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/050fcce2-c1d7-49d2-873a-0e85d090174e-logs\") pod \"050fcce2-c1d7-49d2-873a-0e85d090174e\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.120925 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data-custom\") pod \"050fcce2-c1d7-49d2-873a-0e85d090174e\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.120949 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2ssp\" (UniqueName: \"kubernetes.io/projected/050fcce2-c1d7-49d2-873a-0e85d090174e-kube-api-access-p2ssp\") pod \"050fcce2-c1d7-49d2-873a-0e85d090174e\" (UID: \"050fcce2-c1d7-49d2-873a-0e85d090174e\") " Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.123400 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/050fcce2-c1d7-49d2-873a-0e85d090174e-logs" (OuterVolumeSpecName: "logs") pod "050fcce2-c1d7-49d2-873a-0e85d090174e" (UID: "050fcce2-c1d7-49d2-873a-0e85d090174e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.128400 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/050fcce2-c1d7-49d2-873a-0e85d090174e-kube-api-access-p2ssp" (OuterVolumeSpecName: "kube-api-access-p2ssp") pod "050fcce2-c1d7-49d2-873a-0e85d090174e" (UID: "050fcce2-c1d7-49d2-873a-0e85d090174e"). InnerVolumeSpecName "kube-api-access-p2ssp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.128679 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "050fcce2-c1d7-49d2-873a-0e85d090174e" (UID: "050fcce2-c1d7-49d2-873a-0e85d090174e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.156988 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "050fcce2-c1d7-49d2-873a-0e85d090174e" (UID: "050fcce2-c1d7-49d2-873a-0e85d090174e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.179960 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data" (OuterVolumeSpecName: "config-data") pod "050fcce2-c1d7-49d2-873a-0e85d090174e" (UID: "050fcce2-c1d7-49d2-873a-0e85d090174e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.223565 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/050fcce2-c1d7-49d2-873a-0e85d090174e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.223600 4687 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.223614 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2ssp\" (UniqueName: \"kubernetes.io/projected/050fcce2-c1d7-49d2-873a-0e85d090174e-kube-api-access-p2ssp\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.223627 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.223641 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/050fcce2-c1d7-49d2-873a-0e85d090174e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.335628 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.353842 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5bf794b984-bbcp5" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.418440 4687 generic.go:334] "Generic (PLEG): container finished" podID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerID="ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f" exitCode=0 Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.418493 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74498bff74-qgdp6" event={"ID":"050fcce2-c1d7-49d2-873a-0e85d090174e","Type":"ContainerDied","Data":"ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f"} Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.418574 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-74498bff74-qgdp6" event={"ID":"050fcce2-c1d7-49d2-873a-0e85d090174e","Type":"ContainerDied","Data":"d08b2b93a7a33cb12997aca78458a014be535d871d5d23464c45ba8ec63d5c56"} Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.418590 4687 scope.go:117] "RemoveContainer" containerID="ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.418695 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-74498bff74-qgdp6" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.433653 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"48978802-f2a2-41ad-bc63-e71b66b0747f","Type":"ContainerStarted","Data":"0804347def74230dbd1f5dbee4b83992efd0d8ced7e33b4161bd64821eaa0463"} Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.449305 4687 scope.go:117] "RemoveContainer" containerID="7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.473310 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.473288132 podStartE2EDuration="3.473288132s" podCreationTimestamp="2025-11-25 09:23:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:23:59.455205129 +0000 UTC m=+1234.508844847" watchObservedRunningTime="2025-11-25 09:23:59.473288132 +0000 UTC m=+1234.526927850" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.483992 4687 scope.go:117] "RemoveContainer" containerID="ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f" Nov 25 09:23:59 crc kubenswrapper[4687]: E1125 09:23:59.484813 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f\": container with ID starting with ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f not found: ID does not exist" containerID="ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.484849 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f"} err="failed to get container status \"ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f\": rpc error: code = NotFound desc = could not find container \"ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f\": container with ID starting with ded9129e832a16d66d96458bfc1db8f7a6c1656bb9afa55b5568aa70b752fd1f not found: ID does not exist" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.484876 4687 scope.go:117] "RemoveContainer" containerID="7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0" Nov 25 09:23:59 crc kubenswrapper[4687]: E1125 09:23:59.490642 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0\": container with ID starting with 7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0 not found: ID does not exist" containerID="7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.490700 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0"} err="failed to get container status \"7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0\": rpc error: code = NotFound desc = could not find container \"7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0\": container with ID starting with 7af453fd2c6bb77c76abb18d66e2f3153c14f62ba832ed5d5b861623552843a0 not found: ID does not exist" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.493815 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-74498bff74-qgdp6"] Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.512487 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-74498bff74-qgdp6"] Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.748757 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" path="/var/lib/kubelet/pods/050fcce2-c1d7-49d2-873a-0e85d090174e/volumes" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.859104 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6cd548ffc8-p78fk" Nov 25 09:23:59 crc kubenswrapper[4687]: I1125 09:23:59.953880 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-fdc69b5cc-jz28l" Nov 25 09:24:00 crc kubenswrapper[4687]: I1125 09:24:00.042532 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-79bbc45dbd-qv76b"] Nov 25 09:24:00 crc kubenswrapper[4687]: I1125 09:24:00.042796 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-79bbc45dbd-qv76b" podUID="b919f6e7-2668-404e-b839-5173deb3824d" containerName="neutron-api" containerID="cri-o://e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe" gracePeriod=30 Nov 25 09:24:00 crc kubenswrapper[4687]: I1125 09:24:00.043209 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-79bbc45dbd-qv76b" podUID="b919f6e7-2668-404e-b839-5173deb3824d" containerName="neutron-httpd" containerID="cri-o://a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2" gracePeriod=30 Nov 25 09:24:00 crc kubenswrapper[4687]: I1125 09:24:00.443981 4687 generic.go:334] "Generic (PLEG): container finished" podID="b919f6e7-2668-404e-b839-5173deb3824d" containerID="a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2" exitCode=0 Nov 25 09:24:00 crc kubenswrapper[4687]: I1125 09:24:00.444035 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79bbc45dbd-qv76b" event={"ID":"b919f6e7-2668-404e-b839-5173deb3824d","Type":"ContainerDied","Data":"a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2"} Nov 25 09:24:02 crc kubenswrapper[4687]: I1125 09:24:02.056694 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 09:24:02 crc kubenswrapper[4687]: I1125 09:24:02.717856 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.188274 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 09:24:03 crc kubenswrapper[4687]: E1125 09:24:03.189055 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerName="barbican-api-log" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.189073 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerName="barbican-api-log" Nov 25 09:24:03 crc kubenswrapper[4687]: E1125 09:24:03.189108 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerName="barbican-api" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.189117 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerName="barbican-api" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.189327 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerName="barbican-api" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.189341 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="050fcce2-c1d7-49d2-873a-0e85d090174e" containerName="barbican-api-log" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.189972 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.192181 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.192280 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.192991 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-4c65n" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.200633 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.302879 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f5dea615-cb9c-48fc-a557-9d8fbac041ac-openstack-config-secret\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.303084 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtpsw\" (UniqueName: \"kubernetes.io/projected/f5dea615-cb9c-48fc-a557-9d8fbac041ac-kube-api-access-mtpsw\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.303154 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f5dea615-cb9c-48fc-a557-9d8fbac041ac-openstack-config\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.303216 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5dea615-cb9c-48fc-a557-9d8fbac041ac-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.404938 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f5dea615-cb9c-48fc-a557-9d8fbac041ac-openstack-config-secret\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.405033 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtpsw\" (UniqueName: \"kubernetes.io/projected/f5dea615-cb9c-48fc-a557-9d8fbac041ac-kube-api-access-mtpsw\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.405071 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f5dea615-cb9c-48fc-a557-9d8fbac041ac-openstack-config\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.405094 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5dea615-cb9c-48fc-a557-9d8fbac041ac-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.405991 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f5dea615-cb9c-48fc-a557-9d8fbac041ac-openstack-config\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.420605 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f5dea615-cb9c-48fc-a557-9d8fbac041ac-openstack-config-secret\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.420754 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5dea615-cb9c-48fc-a557-9d8fbac041ac-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.428763 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtpsw\" (UniqueName: \"kubernetes.io/projected/f5dea615-cb9c-48fc-a557-9d8fbac041ac-kube-api-access-mtpsw\") pod \"openstackclient\" (UID: \"f5dea615-cb9c-48fc-a557-9d8fbac041ac\") " pod="openstack/openstackclient" Nov 25 09:24:03 crc kubenswrapper[4687]: I1125 09:24:03.518903 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:24:04 crc kubenswrapper[4687]: W1125 09:24:04.034829 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5dea615_cb9c_48fc_a557_9d8fbac041ac.slice/crio-764d02bf6f8c918a09377d872df7a2553a1d04a9c518cd19ba4b17a7adfbc899 WatchSource:0}: Error finding container 764d02bf6f8c918a09377d872df7a2553a1d04a9c518cd19ba4b17a7adfbc899: Status 404 returned error can't find the container with id 764d02bf6f8c918a09377d872df7a2553a1d04a9c518cd19ba4b17a7adfbc899 Nov 25 09:24:04 crc kubenswrapper[4687]: I1125 09:24:04.035098 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 09:24:04 crc kubenswrapper[4687]: I1125 09:24:04.487462 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"f5dea615-cb9c-48fc-a557-9d8fbac041ac","Type":"ContainerStarted","Data":"764d02bf6f8c918a09377d872df7a2553a1d04a9c518cd19ba4b17a7adfbc899"} Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.270312 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.340911 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-ovndb-tls-certs\") pod \"b919f6e7-2668-404e-b839-5173deb3824d\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.341088 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-httpd-config\") pod \"b919f6e7-2668-404e-b839-5173deb3824d\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.341109 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76ph9\" (UniqueName: \"kubernetes.io/projected/b919f6e7-2668-404e-b839-5173deb3824d-kube-api-access-76ph9\") pod \"b919f6e7-2668-404e-b839-5173deb3824d\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.341127 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-combined-ca-bundle\") pod \"b919f6e7-2668-404e-b839-5173deb3824d\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.341177 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-config\") pod \"b919f6e7-2668-404e-b839-5173deb3824d\" (UID: \"b919f6e7-2668-404e-b839-5173deb3824d\") " Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.347920 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "b919f6e7-2668-404e-b839-5173deb3824d" (UID: "b919f6e7-2668-404e-b839-5173deb3824d"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.349850 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b919f6e7-2668-404e-b839-5173deb3824d-kube-api-access-76ph9" (OuterVolumeSpecName: "kube-api-access-76ph9") pod "b919f6e7-2668-404e-b839-5173deb3824d" (UID: "b919f6e7-2668-404e-b839-5173deb3824d"). InnerVolumeSpecName "kube-api-access-76ph9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.393534 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-config" (OuterVolumeSpecName: "config") pod "b919f6e7-2668-404e-b839-5173deb3824d" (UID: "b919f6e7-2668-404e-b839-5173deb3824d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.394440 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b919f6e7-2668-404e-b839-5173deb3824d" (UID: "b919f6e7-2668-404e-b839-5173deb3824d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.424737 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "b919f6e7-2668-404e-b839-5173deb3824d" (UID: "b919f6e7-2668-404e-b839-5173deb3824d"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.443565 4687 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.443617 4687 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.443628 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76ph9\" (UniqueName: \"kubernetes.io/projected/b919f6e7-2668-404e-b839-5173deb3824d-kube-api-access-76ph9\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.443642 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.443652 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b919f6e7-2668-404e-b839-5173deb3824d-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.499126 4687 generic.go:334] "Generic (PLEG): container finished" podID="b919f6e7-2668-404e-b839-5173deb3824d" containerID="e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe" exitCode=0 Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.499165 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79bbc45dbd-qv76b" event={"ID":"b919f6e7-2668-404e-b839-5173deb3824d","Type":"ContainerDied","Data":"e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe"} Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.499190 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79bbc45dbd-qv76b" event={"ID":"b919f6e7-2668-404e-b839-5173deb3824d","Type":"ContainerDied","Data":"6ed594cb7d51a09e95bfec0f4a285571f5700a688a94ae1590c6c40210994234"} Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.499205 4687 scope.go:117] "RemoveContainer" containerID="a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.499310 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-79bbc45dbd-qv76b" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.530324 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-79bbc45dbd-qv76b"] Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.533670 4687 scope.go:117] "RemoveContainer" containerID="e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.546434 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-79bbc45dbd-qv76b"] Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.565052 4687 scope.go:117] "RemoveContainer" containerID="a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2" Nov 25 09:24:05 crc kubenswrapper[4687]: E1125 09:24:05.565443 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2\": container with ID starting with a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2 not found: ID does not exist" containerID="a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.565475 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2"} err="failed to get container status \"a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2\": rpc error: code = NotFound desc = could not find container \"a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2\": container with ID starting with a5e083bae82f5749220d50032def5547ed68d118cb6e6bdeb82e50de4f9e0de2 not found: ID does not exist" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.565516 4687 scope.go:117] "RemoveContainer" containerID="e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe" Nov 25 09:24:05 crc kubenswrapper[4687]: E1125 09:24:05.565997 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe\": container with ID starting with e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe not found: ID does not exist" containerID="e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.566021 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe"} err="failed to get container status \"e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe\": rpc error: code = NotFound desc = could not find container \"e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe\": container with ID starting with e148295b4a89a444c8c3b5e19c84e8e0a5d50773e6a73501e15b66fa19075ebe not found: ID does not exist" Nov 25 09:24:05 crc kubenswrapper[4687]: I1125 09:24:05.748185 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b919f6e7-2668-404e-b839-5173deb3824d" path="/var/lib/kubelet/pods/b919f6e7-2668-404e-b839-5173deb3824d/volumes" Nov 25 09:24:07 crc kubenswrapper[4687]: I1125 09:24:07.298771 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 09:24:07 crc kubenswrapper[4687]: I1125 09:24:07.382701 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7dc8446cb-d6wz7" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.122706 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6cc97f684c-lcsst"] Nov 25 09:24:08 crc kubenswrapper[4687]: E1125 09:24:08.123152 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b919f6e7-2668-404e-b839-5173deb3824d" containerName="neutron-httpd" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.123175 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b919f6e7-2668-404e-b839-5173deb3824d" containerName="neutron-httpd" Nov 25 09:24:08 crc kubenswrapper[4687]: E1125 09:24:08.123209 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b919f6e7-2668-404e-b839-5173deb3824d" containerName="neutron-api" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.123219 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b919f6e7-2668-404e-b839-5173deb3824d" containerName="neutron-api" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.123457 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b919f6e7-2668-404e-b839-5173deb3824d" containerName="neutron-httpd" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.123480 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b919f6e7-2668-404e-b839-5173deb3824d" containerName="neutron-api" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.124704 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.130818 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.130880 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.131189 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.139986 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6cc97f684c-lcsst"] Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.195658 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-internal-tls-certs\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.196052 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4a5addf-9956-45b0-b761-affcce71a048-run-httpd\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.196185 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-public-tls-certs\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.196323 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4a5addf-9956-45b0-b761-affcce71a048-log-httpd\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.196483 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-config-data\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.196720 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmzfn\" (UniqueName: \"kubernetes.io/projected/e4a5addf-9956-45b0-b761-affcce71a048-kube-api-access-hmzfn\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.196857 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e4a5addf-9956-45b0-b761-affcce71a048-etc-swift\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.196954 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-combined-ca-bundle\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.299412 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmzfn\" (UniqueName: \"kubernetes.io/projected/e4a5addf-9956-45b0-b761-affcce71a048-kube-api-access-hmzfn\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.299498 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e4a5addf-9956-45b0-b761-affcce71a048-etc-swift\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.299563 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-combined-ca-bundle\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.299601 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-internal-tls-certs\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.299624 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4a5addf-9956-45b0-b761-affcce71a048-run-httpd\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.299661 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-public-tls-certs\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.299694 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4a5addf-9956-45b0-b761-affcce71a048-log-httpd\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.299744 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-config-data\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.301841 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4a5addf-9956-45b0-b761-affcce71a048-run-httpd\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.301921 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4a5addf-9956-45b0-b761-affcce71a048-log-httpd\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.306209 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-public-tls-certs\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.309639 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e4a5addf-9956-45b0-b761-affcce71a048-etc-swift\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.310902 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-internal-tls-certs\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.312980 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-config-data\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.315148 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4a5addf-9956-45b0-b761-affcce71a048-combined-ca-bundle\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.319381 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmzfn\" (UniqueName: \"kubernetes.io/projected/e4a5addf-9956-45b0-b761-affcce71a048-kube-api-access-hmzfn\") pod \"swift-proxy-6cc97f684c-lcsst\" (UID: \"e4a5addf-9956-45b0-b761-affcce71a048\") " pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.471548 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:08 crc kubenswrapper[4687]: I1125 09:24:08.733812 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 09:24:09 crc kubenswrapper[4687]: I1125 09:24:09.544853 4687 generic.go:334] "Generic (PLEG): container finished" podID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerID="3a81502e023fe181770985aa8eea339beaf86d4f47bfb8fbe3a59f5d7e83136d" exitCode=137 Nov 25 09:24:09 crc kubenswrapper[4687]: I1125 09:24:09.544901 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09662da0-b802-43c3-9c8e-4c9e951bdd7f","Type":"ContainerDied","Data":"3a81502e023fe181770985aa8eea339beaf86d4f47bfb8fbe3a59f5d7e83136d"} Nov 25 09:24:14 crc kubenswrapper[4687]: I1125 09:24:14.794660 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:24:14 crc kubenswrapper[4687]: I1125 09:24:14.796617 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="639af5c1-faaf-45e2-b75e-6031913ffdb9" containerName="glance-log" containerID="cri-o://48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c" gracePeriod=30 Nov 25 09:24:14 crc kubenswrapper[4687]: I1125 09:24:14.797228 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="639af5c1-faaf-45e2-b75e-6031913ffdb9" containerName="glance-httpd" containerID="cri-o://37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff" gracePeriod=30 Nov 25 09:24:14 crc kubenswrapper[4687]: I1125 09:24:14.903646 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.049483 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-combined-ca-bundle\") pod \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.049922 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-scripts\") pod \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.049994 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-run-httpd\") pod \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.050022 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fgh5\" (UniqueName: \"kubernetes.io/projected/09662da0-b802-43c3-9c8e-4c9e951bdd7f-kube-api-access-5fgh5\") pod \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.050098 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-config-data\") pod \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.050158 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-sg-core-conf-yaml\") pod \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.050202 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-log-httpd\") pod \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\" (UID: \"09662da0-b802-43c3-9c8e-4c9e951bdd7f\") " Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.050494 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "09662da0-b802-43c3-9c8e-4c9e951bdd7f" (UID: "09662da0-b802-43c3-9c8e-4c9e951bdd7f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.050665 4687 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.050769 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "09662da0-b802-43c3-9c8e-4c9e951bdd7f" (UID: "09662da0-b802-43c3-9c8e-4c9e951bdd7f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.056339 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-scripts" (OuterVolumeSpecName: "scripts") pod "09662da0-b802-43c3-9c8e-4c9e951bdd7f" (UID: "09662da0-b802-43c3-9c8e-4c9e951bdd7f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.058562 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09662da0-b802-43c3-9c8e-4c9e951bdd7f-kube-api-access-5fgh5" (OuterVolumeSpecName: "kube-api-access-5fgh5") pod "09662da0-b802-43c3-9c8e-4c9e951bdd7f" (UID: "09662da0-b802-43c3-9c8e-4c9e951bdd7f"). InnerVolumeSpecName "kube-api-access-5fgh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.099663 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "09662da0-b802-43c3-9c8e-4c9e951bdd7f" (UID: "09662da0-b802-43c3-9c8e-4c9e951bdd7f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.109456 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09662da0-b802-43c3-9c8e-4c9e951bdd7f" (UID: "09662da0-b802-43c3-9c8e-4c9e951bdd7f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:15 crc kubenswrapper[4687]: W1125 09:24:15.143164 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4a5addf_9956_45b0_b761_affcce71a048.slice/crio-924ba44cec62bf62b601bc4771962eb291520039d351989a128d2d52d53e1fd7 WatchSource:0}: Error finding container 924ba44cec62bf62b601bc4771962eb291520039d351989a128d2d52d53e1fd7: Status 404 returned error can't find the container with id 924ba44cec62bf62b601bc4771962eb291520039d351989a128d2d52d53e1fd7 Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.144671 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6cc97f684c-lcsst"] Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.149391 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-config-data" (OuterVolumeSpecName: "config-data") pod "09662da0-b802-43c3-9c8e-4c9e951bdd7f" (UID: "09662da0-b802-43c3-9c8e-4c9e951bdd7f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.152296 4687 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.152326 4687 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09662da0-b802-43c3-9c8e-4c9e951bdd7f-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.152339 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.152349 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.152358 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fgh5\" (UniqueName: \"kubernetes.io/projected/09662da0-b802-43c3-9c8e-4c9e951bdd7f-kube-api-access-5fgh5\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.152366 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09662da0-b802-43c3-9c8e-4c9e951bdd7f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.502661 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.503169 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerName="glance-log" containerID="cri-o://8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220" gracePeriod=30 Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.503318 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerName="glance-httpd" containerID="cri-o://bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a" gracePeriod=30 Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.615699 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6cc97f684c-lcsst" event={"ID":"e4a5addf-9956-45b0-b761-affcce71a048","Type":"ContainerStarted","Data":"61e03c96a1365b5031e60abac616beec69f08761fb749e7aa45c4847df87b549"} Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.615752 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6cc97f684c-lcsst" event={"ID":"e4a5addf-9956-45b0-b761-affcce71a048","Type":"ContainerStarted","Data":"83fbed284cb04d58c7aa30fb6b42198847f57835d589f7183eacb32bb5938d6d"} Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.615765 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6cc97f684c-lcsst" event={"ID":"e4a5addf-9956-45b0-b761-affcce71a048","Type":"ContainerStarted","Data":"924ba44cec62bf62b601bc4771962eb291520039d351989a128d2d52d53e1fd7"} Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.617092 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.617128 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.638668 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09662da0-b802-43c3-9c8e-4c9e951bdd7f","Type":"ContainerDied","Data":"24dec1fee7ca90a481627e52c3764aad58ac8fdf62a3dead9cdbd91e5ac09180"} Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.638721 4687 scope.go:117] "RemoveContainer" containerID="3a81502e023fe181770985aa8eea339beaf86d4f47bfb8fbe3a59f5d7e83136d" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.638826 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.649359 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6cc97f684c-lcsst" podStartSLOduration=7.649346779 podStartE2EDuration="7.649346779s" podCreationTimestamp="2025-11-25 09:24:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:24:15.646143171 +0000 UTC m=+1250.699782889" watchObservedRunningTime="2025-11-25 09:24:15.649346779 +0000 UTC m=+1250.702986497" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.666148 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"f5dea615-cb9c-48fc-a557-9d8fbac041ac","Type":"ContainerStarted","Data":"419d19c1c2eacc33370842496db55f197f2df4bb803ca037c69a86c4779fbe33"} Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.679622 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.052832449 podStartE2EDuration="12.679605694s" podCreationTimestamp="2025-11-25 09:24:03 +0000 UTC" firstStartedPulling="2025-11-25 09:24:04.038433917 +0000 UTC m=+1239.092073635" lastFinishedPulling="2025-11-25 09:24:14.665207162 +0000 UTC m=+1249.718846880" observedRunningTime="2025-11-25 09:24:15.677490847 +0000 UTC m=+1250.731130565" watchObservedRunningTime="2025-11-25 09:24:15.679605694 +0000 UTC m=+1250.733245412" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.687736 4687 generic.go:334] "Generic (PLEG): container finished" podID="639af5c1-faaf-45e2-b75e-6031913ffdb9" containerID="48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c" exitCode=143 Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.687800 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"639af5c1-faaf-45e2-b75e-6031913ffdb9","Type":"ContainerDied","Data":"48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c"} Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.696672 4687 scope.go:117] "RemoveContainer" containerID="55641debda0cfb9cd4ff16ce9abff92b66ae9a3061601c32b13947c28897ed00" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.729757 4687 scope.go:117] "RemoveContainer" containerID="4b85b4fc296a4ea78929c1a8a57a40cfa5ce2c00d86d703f758a339b8331c829" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.783277 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.795997 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.803331 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:15 crc kubenswrapper[4687]: E1125 09:24:15.803776 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="sg-core" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.803798 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="sg-core" Nov 25 09:24:15 crc kubenswrapper[4687]: E1125 09:24:15.803812 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="proxy-httpd" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.803819 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="proxy-httpd" Nov 25 09:24:15 crc kubenswrapper[4687]: E1125 09:24:15.803870 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="ceilometer-notification-agent" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.803882 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="ceilometer-notification-agent" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.804072 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="sg-core" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.804100 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="proxy-httpd" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.804113 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" containerName="ceilometer-notification-agent" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.805842 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.808410 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.809480 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.816495 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.971673 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-run-httpd\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.971734 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-log-httpd\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.971764 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.971833 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.971854 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-config-data\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.971876 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hgbf\" (UniqueName: \"kubernetes.io/projected/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-kube-api-access-5hgbf\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:15 crc kubenswrapper[4687]: I1125 09:24:15.971902 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-scripts\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.073161 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.073666 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-config-data\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.073707 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hgbf\" (UniqueName: \"kubernetes.io/projected/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-kube-api-access-5hgbf\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.073743 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-scripts\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.073800 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-run-httpd\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.073848 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-log-httpd\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.073884 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.075452 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-run-httpd\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.075469 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-log-httpd\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.079157 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-scripts\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.079522 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-config-data\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.079850 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.087570 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.093992 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hgbf\" (UniqueName: \"kubernetes.io/projected/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-kube-api-access-5hgbf\") pod \"ceilometer-0\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.132071 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.599381 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.641175 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:16 crc kubenswrapper[4687]: W1125 09:24:16.644701 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf3d562b_dfcc_4c3c_b5f8_ff7f3e284457.slice/crio-3395d24a6abf322703546fe6d39413c0b6478420c075346199c15fbfb57239c3 WatchSource:0}: Error finding container 3395d24a6abf322703546fe6d39413c0b6478420c075346199c15fbfb57239c3: Status 404 returned error can't find the container with id 3395d24a6abf322703546fe6d39413c0b6478420c075346199c15fbfb57239c3 Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.721815 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457","Type":"ContainerStarted","Data":"3395d24a6abf322703546fe6d39413c0b6478420c075346199c15fbfb57239c3"} Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.739052 4687 generic.go:334] "Generic (PLEG): container finished" podID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerID="8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220" exitCode=143 Nov 25 09:24:16 crc kubenswrapper[4687]: I1125 09:24:16.739602 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b","Type":"ContainerDied","Data":"8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220"} Nov 25 09:24:17 crc kubenswrapper[4687]: I1125 09:24:17.383099 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7dc8446cb-d6wz7" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Nov 25 09:24:17 crc kubenswrapper[4687]: I1125 09:24:17.383229 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:24:17 crc kubenswrapper[4687]: I1125 09:24:17.757400 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09662da0-b802-43c3-9c8e-4c9e951bdd7f" path="/var/lib/kubelet/pods/09662da0-b802-43c3-9c8e-4c9e951bdd7f/volumes" Nov 25 09:24:17 crc kubenswrapper[4687]: I1125 09:24:17.766553 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457","Type":"ContainerStarted","Data":"672bc54fd6e8d9355659ba7cc4dfb76c01599310bb6adf00d8db903072dce0c8"} Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.459147 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.524293 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6n5bv\" (UniqueName: \"kubernetes.io/projected/639af5c1-faaf-45e2-b75e-6031913ffdb9-kube-api-access-6n5bv\") pod \"639af5c1-faaf-45e2-b75e-6031913ffdb9\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.525006 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-combined-ca-bundle\") pod \"639af5c1-faaf-45e2-b75e-6031913ffdb9\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.525064 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-scripts\") pod \"639af5c1-faaf-45e2-b75e-6031913ffdb9\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.525108 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-httpd-run\") pod \"639af5c1-faaf-45e2-b75e-6031913ffdb9\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.525152 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"639af5c1-faaf-45e2-b75e-6031913ffdb9\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.525180 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-public-tls-certs\") pod \"639af5c1-faaf-45e2-b75e-6031913ffdb9\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.525237 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-config-data\") pod \"639af5c1-faaf-45e2-b75e-6031913ffdb9\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.525345 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-logs\") pod \"639af5c1-faaf-45e2-b75e-6031913ffdb9\" (UID: \"639af5c1-faaf-45e2-b75e-6031913ffdb9\") " Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.526719 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-logs" (OuterVolumeSpecName: "logs") pod "639af5c1-faaf-45e2-b75e-6031913ffdb9" (UID: "639af5c1-faaf-45e2-b75e-6031913ffdb9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.528409 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "639af5c1-faaf-45e2-b75e-6031913ffdb9" (UID: "639af5c1-faaf-45e2-b75e-6031913ffdb9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.536711 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/639af5c1-faaf-45e2-b75e-6031913ffdb9-kube-api-access-6n5bv" (OuterVolumeSpecName: "kube-api-access-6n5bv") pod "639af5c1-faaf-45e2-b75e-6031913ffdb9" (UID: "639af5c1-faaf-45e2-b75e-6031913ffdb9"). InnerVolumeSpecName "kube-api-access-6n5bv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.543811 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-scripts" (OuterVolumeSpecName: "scripts") pod "639af5c1-faaf-45e2-b75e-6031913ffdb9" (UID: "639af5c1-faaf-45e2-b75e-6031913ffdb9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.555257 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "639af5c1-faaf-45e2-b75e-6031913ffdb9" (UID: "639af5c1-faaf-45e2-b75e-6031913ffdb9"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.610618 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "639af5c1-faaf-45e2-b75e-6031913ffdb9" (UID: "639af5c1-faaf-45e2-b75e-6031913ffdb9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.628083 4687 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.628147 4687 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.628161 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/639af5c1-faaf-45e2-b75e-6031913ffdb9-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.628172 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6n5bv\" (UniqueName: \"kubernetes.io/projected/639af5c1-faaf-45e2-b75e-6031913ffdb9-kube-api-access-6n5bv\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.628186 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.628196 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.652566 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-config-data" (OuterVolumeSpecName: "config-data") pod "639af5c1-faaf-45e2-b75e-6031913ffdb9" (UID: "639af5c1-faaf-45e2-b75e-6031913ffdb9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.659683 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "639af5c1-faaf-45e2-b75e-6031913ffdb9" (UID: "639af5c1-faaf-45e2-b75e-6031913ffdb9"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.669831 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-nnzf8"] Nov 25 09:24:18 crc kubenswrapper[4687]: E1125 09:24:18.670905 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="639af5c1-faaf-45e2-b75e-6031913ffdb9" containerName="glance-httpd" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.670929 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="639af5c1-faaf-45e2-b75e-6031913ffdb9" containerName="glance-httpd" Nov 25 09:24:18 crc kubenswrapper[4687]: E1125 09:24:18.670970 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="639af5c1-faaf-45e2-b75e-6031913ffdb9" containerName="glance-log" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.670978 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="639af5c1-faaf-45e2-b75e-6031913ffdb9" containerName="glance-log" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.671171 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="639af5c1-faaf-45e2-b75e-6031913ffdb9" containerName="glance-httpd" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.671197 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="639af5c1-faaf-45e2-b75e-6031913ffdb9" containerName="glance-log" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.672247 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nnzf8" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.688701 4687 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.694550 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-nnzf8"] Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.730986 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-operator-scripts\") pod \"nova-api-db-create-nnzf8\" (UID: \"f850ec56-21a5-4782-8aa3-2a1f53ddadb2\") " pod="openstack/nova-api-db-create-nnzf8" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.731079 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2525\" (UniqueName: \"kubernetes.io/projected/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-kube-api-access-h2525\") pod \"nova-api-db-create-nnzf8\" (UID: \"f850ec56-21a5-4782-8aa3-2a1f53ddadb2\") " pod="openstack/nova-api-db-create-nnzf8" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.731132 4687 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.731143 4687 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.731153 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/639af5c1-faaf-45e2-b75e-6031913ffdb9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.733853 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.148:9292/healthcheck\": read tcp 10.217.0.2:43464->10.217.0.148:9292: read: connection reset by peer" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.734049 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.148:9292/healthcheck\": read tcp 10.217.0.2:43476->10.217.0.148:9292: read: connection reset by peer" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.743230 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-6bad-account-create-2pbld"] Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.750218 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-6bad-account-create-2pbld" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.752618 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.755823 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-6bad-account-create-2pbld"] Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.775187 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457","Type":"ContainerStarted","Data":"20c8d7c53e8c1243047227417a10d2abc6e193b912587212062828db3b95d181"} Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.780033 4687 generic.go:334] "Generic (PLEG): container finished" podID="639af5c1-faaf-45e2-b75e-6031913ffdb9" containerID="37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff" exitCode=0 Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.780073 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"639af5c1-faaf-45e2-b75e-6031913ffdb9","Type":"ContainerDied","Data":"37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff"} Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.780100 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"639af5c1-faaf-45e2-b75e-6031913ffdb9","Type":"ContainerDied","Data":"8990c357f1ae29b2219db77c6c98c0ac63c0eb7b1bcb11e8c46ccc9edc4d4a74"} Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.780115 4687 scope.go:117] "RemoveContainer" containerID="37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.780234 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.832832 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-operator-scripts\") pod \"nova-api-db-create-nnzf8\" (UID: \"f850ec56-21a5-4782-8aa3-2a1f53ddadb2\") " pod="openstack/nova-api-db-create-nnzf8" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.833631 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-operator-scripts\") pod \"nova-api-db-create-nnzf8\" (UID: \"f850ec56-21a5-4782-8aa3-2a1f53ddadb2\") " pod="openstack/nova-api-db-create-nnzf8" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.833821 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2525\" (UniqueName: \"kubernetes.io/projected/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-kube-api-access-h2525\") pod \"nova-api-db-create-nnzf8\" (UID: \"f850ec56-21a5-4782-8aa3-2a1f53ddadb2\") " pod="openstack/nova-api-db-create-nnzf8" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.836738 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-ll6h7"] Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.838196 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-ll6h7" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.854772 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2525\" (UniqueName: \"kubernetes.io/projected/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-kube-api-access-h2525\") pod \"nova-api-db-create-nnzf8\" (UID: \"f850ec56-21a5-4782-8aa3-2a1f53ddadb2\") " pod="openstack/nova-api-db-create-nnzf8" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.865270 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-ll6h7"] Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.880135 4687 scope.go:117] "RemoveContainer" containerID="48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.906437 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.921746 4687 scope.go:117] "RemoveContainer" containerID="37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff" Nov 25 09:24:18 crc kubenswrapper[4687]: E1125 09:24:18.926675 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff\": container with ID starting with 37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff not found: ID does not exist" containerID="37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.926871 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff"} err="failed to get container status \"37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff\": rpc error: code = NotFound desc = could not find container \"37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff\": container with ID starting with 37dcb870cd9cd737b8534e6422b914527b13739fb776e77cac1c81666563b1ff not found: ID does not exist" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.926977 4687 scope.go:117] "RemoveContainer" containerID="48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c" Nov 25 09:24:18 crc kubenswrapper[4687]: E1125 09:24:18.930643 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c\": container with ID starting with 48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c not found: ID does not exist" containerID="48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.930847 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c"} err="failed to get container status \"48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c\": rpc error: code = NotFound desc = could not find container \"48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c\": container with ID starting with 48b59d8d8d563935a22d9a78a3ea38f4de261f20ec8276e35131232d4547777c not found: ID does not exist" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.936760 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.943175 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phgzd\" (UniqueName: \"kubernetes.io/projected/4261462d-4b8e-4739-b79f-882c109aa8be-kube-api-access-phgzd\") pod \"nova-api-6bad-account-create-2pbld\" (UID: \"4261462d-4b8e-4739-b79f-882c109aa8be\") " pod="openstack/nova-api-6bad-account-create-2pbld" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.944343 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4261462d-4b8e-4739-b79f-882c109aa8be-operator-scripts\") pod \"nova-api-6bad-account-create-2pbld\" (UID: \"4261462d-4b8e-4739-b79f-882c109aa8be\") " pod="openstack/nova-api-6bad-account-create-2pbld" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.944532 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsqmz\" (UniqueName: \"kubernetes.io/projected/3d9195a1-3aeb-466d-970a-8abd134135c8-kube-api-access-lsqmz\") pod \"nova-cell0-db-create-ll6h7\" (UID: \"3d9195a1-3aeb-466d-970a-8abd134135c8\") " pod="openstack/nova-cell0-db-create-ll6h7" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.944936 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d9195a1-3aeb-466d-970a-8abd134135c8-operator-scripts\") pod \"nova-cell0-db-create-ll6h7\" (UID: \"3d9195a1-3aeb-466d-970a-8abd134135c8\") " pod="openstack/nova-cell0-db-create-ll6h7" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.976700 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.978083 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.981411 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.981697 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 09:24:18 crc kubenswrapper[4687]: I1125 09:24:18.987448 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.007269 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nnzf8" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.015542 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-4ndrh"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.016778 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-4ndrh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.027210 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-e925-account-create-wkg4s"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.028662 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e925-account-create-wkg4s" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.033124 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.041629 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-4ndrh"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.046599 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d9195a1-3aeb-466d-970a-8abd134135c8-operator-scripts\") pod \"nova-cell0-db-create-ll6h7\" (UID: \"3d9195a1-3aeb-466d-970a-8abd134135c8\") " pod="openstack/nova-cell0-db-create-ll6h7" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.046642 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phgzd\" (UniqueName: \"kubernetes.io/projected/4261462d-4b8e-4739-b79f-882c109aa8be-kube-api-access-phgzd\") pod \"nova-api-6bad-account-create-2pbld\" (UID: \"4261462d-4b8e-4739-b79f-882c109aa8be\") " pod="openstack/nova-api-6bad-account-create-2pbld" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.046676 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4261462d-4b8e-4739-b79f-882c109aa8be-operator-scripts\") pod \"nova-api-6bad-account-create-2pbld\" (UID: \"4261462d-4b8e-4739-b79f-882c109aa8be\") " pod="openstack/nova-api-6bad-account-create-2pbld" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.046718 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsqmz\" (UniqueName: \"kubernetes.io/projected/3d9195a1-3aeb-466d-970a-8abd134135c8-kube-api-access-lsqmz\") pod \"nova-cell0-db-create-ll6h7\" (UID: \"3d9195a1-3aeb-466d-970a-8abd134135c8\") " pod="openstack/nova-cell0-db-create-ll6h7" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.046763 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2fzb\" (UniqueName: \"kubernetes.io/projected/1db82525-47aa-465b-b638-0d954b02e9b2-kube-api-access-v2fzb\") pod \"nova-cell0-e925-account-create-wkg4s\" (UID: \"1db82525-47aa-465b-b638-0d954b02e9b2\") " pod="openstack/nova-cell0-e925-account-create-wkg4s" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.046826 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1db82525-47aa-465b-b638-0d954b02e9b2-operator-scripts\") pod \"nova-cell0-e925-account-create-wkg4s\" (UID: \"1db82525-47aa-465b-b638-0d954b02e9b2\") " pod="openstack/nova-cell0-e925-account-create-wkg4s" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.048166 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4261462d-4b8e-4739-b79f-882c109aa8be-operator-scripts\") pod \"nova-api-6bad-account-create-2pbld\" (UID: \"4261462d-4b8e-4739-b79f-882c109aa8be\") " pod="openstack/nova-api-6bad-account-create-2pbld" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.053522 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e925-account-create-wkg4s"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.054987 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d9195a1-3aeb-466d-970a-8abd134135c8-operator-scripts\") pod \"nova-cell0-db-create-ll6h7\" (UID: \"3d9195a1-3aeb-466d-970a-8abd134135c8\") " pod="openstack/nova-cell0-db-create-ll6h7" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.072278 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phgzd\" (UniqueName: \"kubernetes.io/projected/4261462d-4b8e-4739-b79f-882c109aa8be-kube-api-access-phgzd\") pod \"nova-api-6bad-account-create-2pbld\" (UID: \"4261462d-4b8e-4739-b79f-882c109aa8be\") " pod="openstack/nova-api-6bad-account-create-2pbld" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.077234 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsqmz\" (UniqueName: \"kubernetes.io/projected/3d9195a1-3aeb-466d-970a-8abd134135c8-kube-api-access-lsqmz\") pod \"nova-cell0-db-create-ll6h7\" (UID: \"3d9195a1-3aeb-466d-970a-8abd134135c8\") " pod="openstack/nova-cell0-db-create-ll6h7" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.139859 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-6bad-account-create-2pbld" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.148575 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-991b-account-create-kdxhh"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.150110 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-991b-account-create-kdxhh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.151589 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.151641 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a3668763-e276-4dc6-bacc-3854d2a49983-operator-scripts\") pod \"nova-cell1-db-create-4ndrh\" (UID: \"a3668763-e276-4dc6-bacc-3854d2a49983\") " pod="openstack/nova-cell1-db-create-4ndrh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.151664 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6vx8\" (UniqueName: \"kubernetes.io/projected/a3668763-e276-4dc6-bacc-3854d2a49983-kube-api-access-b6vx8\") pod \"nova-cell1-db-create-4ndrh\" (UID: \"a3668763-e276-4dc6-bacc-3854d2a49983\") " pod="openstack/nova-cell1-db-create-4ndrh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.151702 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2fzb\" (UniqueName: \"kubernetes.io/projected/1db82525-47aa-465b-b638-0d954b02e9b2-kube-api-access-v2fzb\") pod \"nova-cell0-e925-account-create-wkg4s\" (UID: \"1db82525-47aa-465b-b638-0d954b02e9b2\") " pod="openstack/nova-cell0-e925-account-create-wkg4s" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.151726 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-config-data\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.152519 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.154102 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5bk4\" (UniqueName: \"kubernetes.io/projected/603378ac-d3a5-43ec-bd0f-b4237683f553-kube-api-access-m5bk4\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.154171 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-scripts\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.154231 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1db82525-47aa-465b-b638-0d954b02e9b2-operator-scripts\") pod \"nova-cell0-e925-account-create-wkg4s\" (UID: \"1db82525-47aa-465b-b638-0d954b02e9b2\") " pod="openstack/nova-cell0-e925-account-create-wkg4s" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.154257 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.154356 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.154407 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/603378ac-d3a5-43ec-bd0f-b4237683f553-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.154458 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/603378ac-d3a5-43ec-bd0f-b4237683f553-logs\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.159438 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1db82525-47aa-465b-b638-0d954b02e9b2-operator-scripts\") pod \"nova-cell0-e925-account-create-wkg4s\" (UID: \"1db82525-47aa-465b-b638-0d954b02e9b2\") " pod="openstack/nova-cell0-e925-account-create-wkg4s" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.159727 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-991b-account-create-kdxhh"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.197763 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2fzb\" (UniqueName: \"kubernetes.io/projected/1db82525-47aa-465b-b638-0d954b02e9b2-kube-api-access-v2fzb\") pod \"nova-cell0-e925-account-create-wkg4s\" (UID: \"1db82525-47aa-465b-b638-0d954b02e9b2\") " pod="openstack/nova-cell0-e925-account-create-wkg4s" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.220538 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-ll6h7" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280167 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280232 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7msqh\" (UniqueName: \"kubernetes.io/projected/8c17f596-473c-49f6-b2f2-56ee94f47c1b-kube-api-access-7msqh\") pod \"nova-cell1-991b-account-create-kdxhh\" (UID: \"8c17f596-473c-49f6-b2f2-56ee94f47c1b\") " pod="openstack/nova-cell1-991b-account-create-kdxhh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280268 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a3668763-e276-4dc6-bacc-3854d2a49983-operator-scripts\") pod \"nova-cell1-db-create-4ndrh\" (UID: \"a3668763-e276-4dc6-bacc-3854d2a49983\") " pod="openstack/nova-cell1-db-create-4ndrh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280293 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6vx8\" (UniqueName: \"kubernetes.io/projected/a3668763-e276-4dc6-bacc-3854d2a49983-kube-api-access-b6vx8\") pod \"nova-cell1-db-create-4ndrh\" (UID: \"a3668763-e276-4dc6-bacc-3854d2a49983\") " pod="openstack/nova-cell1-db-create-4ndrh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280341 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-config-data\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280363 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c17f596-473c-49f6-b2f2-56ee94f47c1b-operator-scripts\") pod \"nova-cell1-991b-account-create-kdxhh\" (UID: \"8c17f596-473c-49f6-b2f2-56ee94f47c1b\") " pod="openstack/nova-cell1-991b-account-create-kdxhh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280395 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5bk4\" (UniqueName: \"kubernetes.io/projected/603378ac-d3a5-43ec-bd0f-b4237683f553-kube-api-access-m5bk4\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280415 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-scripts\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280463 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280524 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280553 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/603378ac-d3a5-43ec-bd0f-b4237683f553-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.280585 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/603378ac-d3a5-43ec-bd0f-b4237683f553-logs\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.281115 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/603378ac-d3a5-43ec-bd0f-b4237683f553-logs\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.281145 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a3668763-e276-4dc6-bacc-3854d2a49983-operator-scripts\") pod \"nova-cell1-db-create-4ndrh\" (UID: \"a3668763-e276-4dc6-bacc-3854d2a49983\") " pod="openstack/nova-cell1-db-create-4ndrh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.281565 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.289382 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e925-account-create-wkg4s" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.300604 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/603378ac-d3a5-43ec-bd0f-b4237683f553-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.308197 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6vx8\" (UniqueName: \"kubernetes.io/projected/a3668763-e276-4dc6-bacc-3854d2a49983-kube-api-access-b6vx8\") pod \"nova-cell1-db-create-4ndrh\" (UID: \"a3668763-e276-4dc6-bacc-3854d2a49983\") " pod="openstack/nova-cell1-db-create-4ndrh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.309654 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-config-data\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.324375 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.324821 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-scripts\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.325409 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/603378ac-d3a5-43ec-bd0f-b4237683f553-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.347053 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5bk4\" (UniqueName: \"kubernetes.io/projected/603378ac-d3a5-43ec-bd0f-b4237683f553-kube-api-access-m5bk4\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.349972 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"603378ac-d3a5-43ec-bd0f-b4237683f553\") " pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.383880 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7msqh\" (UniqueName: \"kubernetes.io/projected/8c17f596-473c-49f6-b2f2-56ee94f47c1b-kube-api-access-7msqh\") pod \"nova-cell1-991b-account-create-kdxhh\" (UID: \"8c17f596-473c-49f6-b2f2-56ee94f47c1b\") " pod="openstack/nova-cell1-991b-account-create-kdxhh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.383973 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c17f596-473c-49f6-b2f2-56ee94f47c1b-operator-scripts\") pod \"nova-cell1-991b-account-create-kdxhh\" (UID: \"8c17f596-473c-49f6-b2f2-56ee94f47c1b\") " pod="openstack/nova-cell1-991b-account-create-kdxhh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.384727 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c17f596-473c-49f6-b2f2-56ee94f47c1b-operator-scripts\") pod \"nova-cell1-991b-account-create-kdxhh\" (UID: \"8c17f596-473c-49f6-b2f2-56ee94f47c1b\") " pod="openstack/nova-cell1-991b-account-create-kdxhh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.408036 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7msqh\" (UniqueName: \"kubernetes.io/projected/8c17f596-473c-49f6-b2f2-56ee94f47c1b-kube-api-access-7msqh\") pod \"nova-cell1-991b-account-create-kdxhh\" (UID: \"8c17f596-473c-49f6-b2f2-56ee94f47c1b\") " pod="openstack/nova-cell1-991b-account-create-kdxhh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.446519 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.453937 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-4ndrh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.587465 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-combined-ca-bundle\") pod \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.589169 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-httpd-run\") pod \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.589225 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-logs\") pod \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.589264 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-config-data\") pod \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.589301 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xd2bx\" (UniqueName: \"kubernetes.io/projected/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-kube-api-access-xd2bx\") pod \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.589338 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.589358 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-internal-tls-certs\") pod \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.589408 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-scripts\") pod \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\" (UID: \"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b\") " Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.590584 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" (UID: "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.590989 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-logs" (OuterVolumeSpecName: "logs") pod "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" (UID: "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.592386 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.601286 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-kube-api-access-xd2bx" (OuterVolumeSpecName: "kube-api-access-xd2bx") pod "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" (UID: "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b"). InnerVolumeSpecName "kube-api-access-xd2bx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.619309 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-scripts" (OuterVolumeSpecName: "scripts") pod "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" (UID: "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.633225 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" (UID: "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.657912 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-991b-account-create-kdxhh" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.667698 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" (UID: "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.694777 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xd2bx\" (UniqueName: \"kubernetes.io/projected/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-kube-api-access-xd2bx\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.694820 4687 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.694835 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.694844 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.694852 4687 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.694861 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.748154 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="639af5c1-faaf-45e2-b75e-6031913ffdb9" path="/var/lib/kubelet/pods/639af5c1-faaf-45e2-b75e-6031913ffdb9/volumes" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.753839 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-nnzf8"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.777714 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-config-data" (OuterVolumeSpecName: "config-data") pod "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" (UID: "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:19 crc kubenswrapper[4687]: W1125 09:24:19.792987 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf850ec56_21a5_4782_8aa3_2a1f53ddadb2.slice/crio-04653c62656f5b60c57e537610a109ce5b42f18799ba63aa6676b35b391703af WatchSource:0}: Error finding container 04653c62656f5b60c57e537610a109ce5b42f18799ba63aa6676b35b391703af: Status 404 returned error can't find the container with id 04653c62656f5b60c57e537610a109ce5b42f18799ba63aa6676b35b391703af Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.801709 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.803968 4687 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.807152 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" (UID: "6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.834104 4687 generic.go:334] "Generic (PLEG): container finished" podID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerID="bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a" exitCode=0 Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.834206 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b","Type":"ContainerDied","Data":"bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a"} Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.834225 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.834248 4687 scope.go:117] "RemoveContainer" containerID="bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.834237 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b","Type":"ContainerDied","Data":"dcc7272a63db41f14f73046d8acbf7505f24845e363bf61f9cb7a74003d2dea4"} Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.850772 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457","Type":"ContainerStarted","Data":"ea8bd60d99f129b660274b1b6ebddd7d7adfbe698613324272937802a50fd44d"} Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.906473 4687 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.906533 4687 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.913948 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.935629 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.944568 4687 scope.go:117] "RemoveContainer" containerID="8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.955966 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:24:19 crc kubenswrapper[4687]: E1125 09:24:19.956328 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerName="glance-httpd" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.956339 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerName="glance-httpd" Nov 25 09:24:19 crc kubenswrapper[4687]: E1125 09:24:19.956372 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerName="glance-log" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.956378 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerName="glance-log" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.957214 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerName="glance-log" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.957231 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" containerName="glance-httpd" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.959585 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.961599 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.961791 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.981724 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-6bad-account-create-2pbld"] Nov 25 09:24:19 crc kubenswrapper[4687]: I1125 09:24:19.992574 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.040642 4687 scope.go:117] "RemoveContainer" containerID="bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a" Nov 25 09:24:20 crc kubenswrapper[4687]: E1125 09:24:20.042681 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a\": container with ID starting with bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a not found: ID does not exist" containerID="bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.042720 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a"} err="failed to get container status \"bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a\": rpc error: code = NotFound desc = could not find container \"bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a\": container with ID starting with bc8e5baa0138b8324cec2648c0681f3aedf3d15ce1d321f2e6bba60401d4e86a not found: ID does not exist" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.042751 4687 scope.go:117] "RemoveContainer" containerID="8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220" Nov 25 09:24:20 crc kubenswrapper[4687]: E1125 09:24:20.045239 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220\": container with ID starting with 8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220 not found: ID does not exist" containerID="8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.045265 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220"} err="failed to get container status \"8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220\": rpc error: code = NotFound desc = could not find container \"8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220\": container with ID starting with 8e9dc2fe748b29eb8444030972d323442905c30915811bcabf006b1aa63fa220 not found: ID does not exist" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.070118 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e925-account-create-wkg4s"] Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.093586 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-ll6h7"] Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.109613 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.110030 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.110064 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.110111 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch9sq\" (UniqueName: \"kubernetes.io/projected/255349d3-1260-430d-a74a-2fa4027d92b5-kube-api-access-ch9sq\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.110163 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/255349d3-1260-430d-a74a-2fa4027d92b5-logs\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.110213 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/255349d3-1260-430d-a74a-2fa4027d92b5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.110259 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.110286 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.250634 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/255349d3-1260-430d-a74a-2fa4027d92b5-logs\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.250713 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/255349d3-1260-430d-a74a-2fa4027d92b5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.250753 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.250784 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.250824 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.250876 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.250901 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.250937 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch9sq\" (UniqueName: \"kubernetes.io/projected/255349d3-1260-430d-a74a-2fa4027d92b5-kube-api-access-ch9sq\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.251568 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.255763 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/255349d3-1260-430d-a74a-2fa4027d92b5-logs\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.255982 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/255349d3-1260-430d-a74a-2fa4027d92b5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.260637 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.266276 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.268724 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.287233 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/255349d3-1260-430d-a74a-2fa4027d92b5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.318212 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch9sq\" (UniqueName: \"kubernetes.io/projected/255349d3-1260-430d-a74a-2fa4027d92b5-kube-api-access-ch9sq\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.319763 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-4ndrh"] Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.366935 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"255349d3-1260-430d-a74a-2fa4027d92b5\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.512863 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-991b-account-create-kdxhh"] Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.524250 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.732610 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.879728 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457","Type":"ContainerStarted","Data":"431647bb5a94ba83ebdb5334e430fcba71f052f2cd2c2ec3a9db257c615b9701"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.879888 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="ceilometer-central-agent" containerID="cri-o://672bc54fd6e8d9355659ba7cc4dfb76c01599310bb6adf00d8db903072dce0c8" gracePeriod=30 Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.880135 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.880315 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="proxy-httpd" containerID="cri-o://431647bb5a94ba83ebdb5334e430fcba71f052f2cd2c2ec3a9db257c615b9701" gracePeriod=30 Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.880403 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="sg-core" containerID="cri-o://ea8bd60d99f129b660274b1b6ebddd7d7adfbe698613324272937802a50fd44d" gracePeriod=30 Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.880382 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="ceilometer-notification-agent" containerID="cri-o://20c8d7c53e8c1243047227417a10d2abc6e193b912587212062828db3b95d181" gracePeriod=30 Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.928141 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-991b-account-create-kdxhh" event={"ID":"8c17f596-473c-49f6-b2f2-56ee94f47c1b","Type":"ContainerStarted","Data":"7fbad29cb79250ca8b9ba033fa65403924ed6198ade23d2a20f9e346392b954b"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.928173 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-991b-account-create-kdxhh" event={"ID":"8c17f596-473c-49f6-b2f2-56ee94f47c1b","Type":"ContainerStarted","Data":"5af0c187c1da3b59e77daf46c581798b201bc04ce630c5759490e2692798a74d"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.931907 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"603378ac-d3a5-43ec-bd0f-b4237683f553","Type":"ContainerStarted","Data":"142c8e2827e2be282bcf68f635740e292c38c87106085bd635e6f7bcf25d47ee"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.936301 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-4ndrh" event={"ID":"a3668763-e276-4dc6-bacc-3854d2a49983","Type":"ContainerStarted","Data":"ed6f2a783e4cc6b5d2e8cb7d015481b22894333f8f1cbf4cc5a1a94e4bcfc3ec"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.936354 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-4ndrh" event={"ID":"a3668763-e276-4dc6-bacc-3854d2a49983","Type":"ContainerStarted","Data":"d76def7052f1091fbdc38eb95d3fc7e8589c46ab32152a193ff12c69b23dcaa0"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.945390 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.472499243 podStartE2EDuration="5.945373302s" podCreationTimestamp="2025-11-25 09:24:15 +0000 UTC" firstStartedPulling="2025-11-25 09:24:16.646790619 +0000 UTC m=+1251.700430337" lastFinishedPulling="2025-11-25 09:24:20.119664678 +0000 UTC m=+1255.173304396" observedRunningTime="2025-11-25 09:24:20.918279064 +0000 UTC m=+1255.971918782" watchObservedRunningTime="2025-11-25 09:24:20.945373302 +0000 UTC m=+1255.999013020" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.946196 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-991b-account-create-kdxhh" podStartSLOduration=1.946190295 podStartE2EDuration="1.946190295s" podCreationTimestamp="2025-11-25 09:24:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:24:20.941836406 +0000 UTC m=+1255.995476124" watchObservedRunningTime="2025-11-25 09:24:20.946190295 +0000 UTC m=+1255.999830013" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.946887 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e925-account-create-wkg4s" event={"ID":"1db82525-47aa-465b-b638-0d954b02e9b2","Type":"ContainerStarted","Data":"63d830397315354ba571b8773524134aca212307a3a1921cfdf3cb8b70218a22"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.946923 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e925-account-create-wkg4s" event={"ID":"1db82525-47aa-465b-b638-0d954b02e9b2","Type":"ContainerStarted","Data":"5c2be4cc33cc93bcaa053a059d60e767349ebd30be78e8cb019dfdd6f7ee9e8e"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.952666 4687 generic.go:334] "Generic (PLEG): container finished" podID="f850ec56-21a5-4782-8aa3-2a1f53ddadb2" containerID="d5f98ca1adf1eff43472d45aa9dc38757306f456cf2c04649185d29a0443ccb1" exitCode=0 Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.952763 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-nnzf8" event={"ID":"f850ec56-21a5-4782-8aa3-2a1f53ddadb2","Type":"ContainerDied","Data":"d5f98ca1adf1eff43472d45aa9dc38757306f456cf2c04649185d29a0443ccb1"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.962836 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-nnzf8" event={"ID":"f850ec56-21a5-4782-8aa3-2a1f53ddadb2","Type":"ContainerStarted","Data":"04653c62656f5b60c57e537610a109ce5b42f18799ba63aa6676b35b391703af"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.976153 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-ll6h7" event={"ID":"3d9195a1-3aeb-466d-970a-8abd134135c8","Type":"ContainerStarted","Data":"2d95512df8c52f6c6b354f331404d397597558cbd227487b04c0a69a3760675b"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.976211 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-ll6h7" event={"ID":"3d9195a1-3aeb-466d-970a-8abd134135c8","Type":"ContainerStarted","Data":"20b68fe3250e055785a0c3b7d67e54c7992c97c8590ab03f22486e83c299369a"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.977746 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-4ndrh" podStartSLOduration=2.9777224650000003 podStartE2EDuration="2.977722465s" podCreationTimestamp="2025-11-25 09:24:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:24:20.957142394 +0000 UTC m=+1256.010782112" watchObservedRunningTime="2025-11-25 09:24:20.977722465 +0000 UTC m=+1256.031362183" Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.983540 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-6bad-account-create-2pbld" event={"ID":"4261462d-4b8e-4739-b79f-882c109aa8be","Type":"ContainerStarted","Data":"213bf437309e42cd8989c538e3655f49beee4fefe8c494e9af5b52e4c54d4244"} Nov 25 09:24:20 crc kubenswrapper[4687]: I1125 09:24:20.983586 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-6bad-account-create-2pbld" event={"ID":"4261462d-4b8e-4739-b79f-882c109aa8be","Type":"ContainerStarted","Data":"ce9bb7d4e2aece38e15520becf5eea9559274b219c4abd380af1b455dcac8d7e"} Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.024915 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-e925-account-create-wkg4s" podStartSLOduration=3.024895882 podStartE2EDuration="3.024895882s" podCreationTimestamp="2025-11-25 09:24:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:24:20.994641366 +0000 UTC m=+1256.048281084" watchObservedRunningTime="2025-11-25 09:24:21.024895882 +0000 UTC m=+1256.078535600" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.046970 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-6bad-account-create-2pbld" podStartSLOduration=3.046950034 podStartE2EDuration="3.046950034s" podCreationTimestamp="2025-11-25 09:24:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:24:21.012008601 +0000 UTC m=+1256.065648309" watchObservedRunningTime="2025-11-25 09:24:21.046950034 +0000 UTC m=+1256.100589752" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.050483 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-ll6h7" podStartSLOduration=3.050473019 podStartE2EDuration="3.050473019s" podCreationTimestamp="2025-11-25 09:24:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:24:21.029581169 +0000 UTC m=+1256.083220887" watchObservedRunningTime="2025-11-25 09:24:21.050473019 +0000 UTC m=+1256.104112737" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.117137 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:24:21 crc kubenswrapper[4687]: W1125 09:24:21.161603 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod255349d3_1260_430d_a74a_2fa4027d92b5.slice/crio-a94f81aaa7a20d9390dc80a220d6a7baf3609a268b1a75d7469c5ea8a306399e WatchSource:0}: Error finding container a94f81aaa7a20d9390dc80a220d6a7baf3609a268b1a75d7469c5ea8a306399e: Status 404 returned error can't find the container with id a94f81aaa7a20d9390dc80a220d6a7baf3609a268b1a75d7469c5ea8a306399e Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.630779 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.692689 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2f22d\" (UniqueName: \"kubernetes.io/projected/ea89f490-5a54-46eb-9b1c-1eb96dd181da-kube-api-access-2f22d\") pod \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.692828 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-secret-key\") pod \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.692868 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-tls-certs\") pod \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.692915 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-config-data\") pod \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.692953 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea89f490-5a54-46eb-9b1c-1eb96dd181da-logs\") pod \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.693491 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-scripts\") pod \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.694071 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea89f490-5a54-46eb-9b1c-1eb96dd181da-logs" (OuterVolumeSpecName: "logs") pod "ea89f490-5a54-46eb-9b1c-1eb96dd181da" (UID: "ea89f490-5a54-46eb-9b1c-1eb96dd181da"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.699196 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-combined-ca-bundle\") pod \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\" (UID: \"ea89f490-5a54-46eb-9b1c-1eb96dd181da\") " Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.700301 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea89f490-5a54-46eb-9b1c-1eb96dd181da-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.718755 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea89f490-5a54-46eb-9b1c-1eb96dd181da-kube-api-access-2f22d" (OuterVolumeSpecName: "kube-api-access-2f22d") pod "ea89f490-5a54-46eb-9b1c-1eb96dd181da" (UID: "ea89f490-5a54-46eb-9b1c-1eb96dd181da"). InnerVolumeSpecName "kube-api-access-2f22d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.725461 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "ea89f490-5a54-46eb-9b1c-1eb96dd181da" (UID: "ea89f490-5a54-46eb-9b1c-1eb96dd181da"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.754782 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b" path="/var/lib/kubelet/pods/6c6f77df-fa4a-4b50-b4bd-82d7ac284b4b/volumes" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.754844 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ea89f490-5a54-46eb-9b1c-1eb96dd181da" (UID: "ea89f490-5a54-46eb-9b1c-1eb96dd181da"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.768079 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-scripts" (OuterVolumeSpecName: "scripts") pod "ea89f490-5a54-46eb-9b1c-1eb96dd181da" (UID: "ea89f490-5a54-46eb-9b1c-1eb96dd181da"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.790315 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-config-data" (OuterVolumeSpecName: "config-data") pod "ea89f490-5a54-46eb-9b1c-1eb96dd181da" (UID: "ea89f490-5a54-46eb-9b1c-1eb96dd181da"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.804966 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.805016 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2f22d\" (UniqueName: \"kubernetes.io/projected/ea89f490-5a54-46eb-9b1c-1eb96dd181da-kube-api-access-2f22d\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.805031 4687 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.805040 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.805048 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea89f490-5a54-46eb-9b1c-1eb96dd181da-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.829091 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "ea89f490-5a54-46eb-9b1c-1eb96dd181da" (UID: "ea89f490-5a54-46eb-9b1c-1eb96dd181da"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:21 crc kubenswrapper[4687]: I1125 09:24:21.906943 4687 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea89f490-5a54-46eb-9b1c-1eb96dd181da-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.020562 4687 generic.go:334] "Generic (PLEG): container finished" podID="3d9195a1-3aeb-466d-970a-8abd134135c8" containerID="2d95512df8c52f6c6b354f331404d397597558cbd227487b04c0a69a3760675b" exitCode=0 Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.020672 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-ll6h7" event={"ID":"3d9195a1-3aeb-466d-970a-8abd134135c8","Type":"ContainerDied","Data":"2d95512df8c52f6c6b354f331404d397597558cbd227487b04c0a69a3760675b"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.034067 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"255349d3-1260-430d-a74a-2fa4027d92b5","Type":"ContainerStarted","Data":"04a24be5fb613213743ec7ca23ffa0df5590c3fb589ba8ba9b3a4cb7e311bd72"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.034120 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"255349d3-1260-430d-a74a-2fa4027d92b5","Type":"ContainerStarted","Data":"a94f81aaa7a20d9390dc80a220d6a7baf3609a268b1a75d7469c5ea8a306399e"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.036466 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-6bad-account-create-2pbld" event={"ID":"4261462d-4b8e-4739-b79f-882c109aa8be","Type":"ContainerDied","Data":"213bf437309e42cd8989c538e3655f49beee4fefe8c494e9af5b52e4c54d4244"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.036543 4687 generic.go:334] "Generic (PLEG): container finished" podID="4261462d-4b8e-4739-b79f-882c109aa8be" containerID="213bf437309e42cd8989c538e3655f49beee4fefe8c494e9af5b52e4c54d4244" exitCode=0 Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.038901 4687 generic.go:334] "Generic (PLEG): container finished" podID="1db82525-47aa-465b-b638-0d954b02e9b2" containerID="63d830397315354ba571b8773524134aca212307a3a1921cfdf3cb8b70218a22" exitCode=0 Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.038955 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e925-account-create-wkg4s" event={"ID":"1db82525-47aa-465b-b638-0d954b02e9b2","Type":"ContainerDied","Data":"63d830397315354ba571b8773524134aca212307a3a1921cfdf3cb8b70218a22"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.051130 4687 generic.go:334] "Generic (PLEG): container finished" podID="8c17f596-473c-49f6-b2f2-56ee94f47c1b" containerID="7fbad29cb79250ca8b9ba033fa65403924ed6198ade23d2a20f9e346392b954b" exitCode=0 Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.051177 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-991b-account-create-kdxhh" event={"ID":"8c17f596-473c-49f6-b2f2-56ee94f47c1b","Type":"ContainerDied","Data":"7fbad29cb79250ca8b9ba033fa65403924ed6198ade23d2a20f9e346392b954b"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.070608 4687 generic.go:334] "Generic (PLEG): container finished" podID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerID="104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66" exitCode=137 Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.070727 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7dc8446cb-d6wz7" event={"ID":"ea89f490-5a54-46eb-9b1c-1eb96dd181da","Type":"ContainerDied","Data":"104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.070765 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7dc8446cb-d6wz7" event={"ID":"ea89f490-5a54-46eb-9b1c-1eb96dd181da","Type":"ContainerDied","Data":"edf5b834f44317b053e8fb14665d28bd1dcb752f41fadafeaf32f4dabc68b005"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.070792 4687 scope.go:117] "RemoveContainer" containerID="de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.071100 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7dc8446cb-d6wz7" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.077217 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"603378ac-d3a5-43ec-bd0f-b4237683f553","Type":"ContainerStarted","Data":"e8473bfed7bf49e90b9412885fc79b68c6b66fb70a3e165fde009766bc8c7e7d"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.082442 4687 generic.go:334] "Generic (PLEG): container finished" podID="a3668763-e276-4dc6-bacc-3854d2a49983" containerID="ed6f2a783e4cc6b5d2e8cb7d015481b22894333f8f1cbf4cc5a1a94e4bcfc3ec" exitCode=0 Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.082486 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-4ndrh" event={"ID":"a3668763-e276-4dc6-bacc-3854d2a49983","Type":"ContainerDied","Data":"ed6f2a783e4cc6b5d2e8cb7d015481b22894333f8f1cbf4cc5a1a94e4bcfc3ec"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.098386 4687 generic.go:334] "Generic (PLEG): container finished" podID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerID="ea8bd60d99f129b660274b1b6ebddd7d7adfbe698613324272937802a50fd44d" exitCode=2 Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.098673 4687 generic.go:334] "Generic (PLEG): container finished" podID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerID="20c8d7c53e8c1243047227417a10d2abc6e193b912587212062828db3b95d181" exitCode=0 Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.098470 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457","Type":"ContainerDied","Data":"ea8bd60d99f129b660274b1b6ebddd7d7adfbe698613324272937802a50fd44d"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.098788 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457","Type":"ContainerDied","Data":"20c8d7c53e8c1243047227417a10d2abc6e193b912587212062828db3b95d181"} Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.158441 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7dc8446cb-d6wz7"] Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.166586 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7dc8446cb-d6wz7"] Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.359410 4687 scope.go:117] "RemoveContainer" containerID="104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.461353 4687 scope.go:117] "RemoveContainer" containerID="de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb" Nov 25 09:24:22 crc kubenswrapper[4687]: E1125 09:24:22.461962 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb\": container with ID starting with de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb not found: ID does not exist" containerID="de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.461998 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb"} err="failed to get container status \"de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb\": rpc error: code = NotFound desc = could not find container \"de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb\": container with ID starting with de6dd6a7ef780dbd15e6c80b510b13d7a645a96dcdbc08bfde45bbfa30791aeb not found: ID does not exist" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.462024 4687 scope.go:117] "RemoveContainer" containerID="104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66" Nov 25 09:24:22 crc kubenswrapper[4687]: E1125 09:24:22.462221 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66\": container with ID starting with 104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66 not found: ID does not exist" containerID="104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.462247 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66"} err="failed to get container status \"104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66\": rpc error: code = NotFound desc = could not find container \"104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66\": container with ID starting with 104c046087fa3bf6c2aea654a6dd6e82a67ada49e25b5ccd82128193ef494c66 not found: ID does not exist" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.471620 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nnzf8" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.619739 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-operator-scripts\") pod \"f850ec56-21a5-4782-8aa3-2a1f53ddadb2\" (UID: \"f850ec56-21a5-4782-8aa3-2a1f53ddadb2\") " Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.620100 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2525\" (UniqueName: \"kubernetes.io/projected/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-kube-api-access-h2525\") pod \"f850ec56-21a5-4782-8aa3-2a1f53ddadb2\" (UID: \"f850ec56-21a5-4782-8aa3-2a1f53ddadb2\") " Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.620643 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f850ec56-21a5-4782-8aa3-2a1f53ddadb2" (UID: "f850ec56-21a5-4782-8aa3-2a1f53ddadb2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.629709 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-kube-api-access-h2525" (OuterVolumeSpecName: "kube-api-access-h2525") pod "f850ec56-21a5-4782-8aa3-2a1f53ddadb2" (UID: "f850ec56-21a5-4782-8aa3-2a1f53ddadb2"). InnerVolumeSpecName "kube-api-access-h2525". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.722046 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:22 crc kubenswrapper[4687]: I1125 09:24:22.722081 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2525\" (UniqueName: \"kubernetes.io/projected/f850ec56-21a5-4782-8aa3-2a1f53ddadb2-kube-api-access-h2525\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.108488 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-nnzf8" event={"ID":"f850ec56-21a5-4782-8aa3-2a1f53ddadb2","Type":"ContainerDied","Data":"04653c62656f5b60c57e537610a109ce5b42f18799ba63aa6676b35b391703af"} Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.108846 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="04653c62656f5b60c57e537610a109ce5b42f18799ba63aa6676b35b391703af" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.108556 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nnzf8" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.111112 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"255349d3-1260-430d-a74a-2fa4027d92b5","Type":"ContainerStarted","Data":"5be2c807826f50242db34e3a073dcf7ae1503ee58f2de2ce41133f2b14e3197c"} Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.113181 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"603378ac-d3a5-43ec-bd0f-b4237683f553","Type":"ContainerStarted","Data":"a149841e47373cea6423b30e61e0c8c9ae9c54a2566422ffb2e267b11b899eb7"} Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.145644 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.145625185 podStartE2EDuration="4.145625185s" podCreationTimestamp="2025-11-25 09:24:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:24:23.141137283 +0000 UTC m=+1258.194777001" watchObservedRunningTime="2025-11-25 09:24:23.145625185 +0000 UTC m=+1258.199264903" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.167784 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.167766869 podStartE2EDuration="5.167766869s" podCreationTimestamp="2025-11-25 09:24:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:24:23.166744101 +0000 UTC m=+1258.220383819" watchObservedRunningTime="2025-11-25 09:24:23.167766869 +0000 UTC m=+1258.221406587" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.534634 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.557782 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6cc97f684c-lcsst" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.590205 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-6bad-account-create-2pbld" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.642002 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phgzd\" (UniqueName: \"kubernetes.io/projected/4261462d-4b8e-4739-b79f-882c109aa8be-kube-api-access-phgzd\") pod \"4261462d-4b8e-4739-b79f-882c109aa8be\" (UID: \"4261462d-4b8e-4739-b79f-882c109aa8be\") " Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.642419 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4261462d-4b8e-4739-b79f-882c109aa8be-operator-scripts\") pod \"4261462d-4b8e-4739-b79f-882c109aa8be\" (UID: \"4261462d-4b8e-4739-b79f-882c109aa8be\") " Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.643629 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4261462d-4b8e-4739-b79f-882c109aa8be-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4261462d-4b8e-4739-b79f-882c109aa8be" (UID: "4261462d-4b8e-4739-b79f-882c109aa8be"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.650996 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4261462d-4b8e-4739-b79f-882c109aa8be-kube-api-access-phgzd" (OuterVolumeSpecName: "kube-api-access-phgzd") pod "4261462d-4b8e-4739-b79f-882c109aa8be" (UID: "4261462d-4b8e-4739-b79f-882c109aa8be"). InnerVolumeSpecName "kube-api-access-phgzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.744452 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phgzd\" (UniqueName: \"kubernetes.io/projected/4261462d-4b8e-4739-b79f-882c109aa8be-kube-api-access-phgzd\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.744478 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4261462d-4b8e-4739-b79f-882c109aa8be-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.777339 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" path="/var/lib/kubelet/pods/ea89f490-5a54-46eb-9b1c-1eb96dd181da/volumes" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.822284 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-ll6h7" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.836690 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-4ndrh" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.843885 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-991b-account-create-kdxhh" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.844077 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.844110 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.844143 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.844767 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0e89ebd4720ffd5c135c8bc00be72ce7345dd6f93bd878517e70d876f94fe463"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.844818 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://0e89ebd4720ffd5c135c8bc00be72ce7345dd6f93bd878517e70d876f94fe463" gracePeriod=600 Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.864352 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e925-account-create-wkg4s" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.946426 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2fzb\" (UniqueName: \"kubernetes.io/projected/1db82525-47aa-465b-b638-0d954b02e9b2-kube-api-access-v2fzb\") pod \"1db82525-47aa-465b-b638-0d954b02e9b2\" (UID: \"1db82525-47aa-465b-b638-0d954b02e9b2\") " Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.946600 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lsqmz\" (UniqueName: \"kubernetes.io/projected/3d9195a1-3aeb-466d-970a-8abd134135c8-kube-api-access-lsqmz\") pod \"3d9195a1-3aeb-466d-970a-8abd134135c8\" (UID: \"3d9195a1-3aeb-466d-970a-8abd134135c8\") " Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.946679 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7msqh\" (UniqueName: \"kubernetes.io/projected/8c17f596-473c-49f6-b2f2-56ee94f47c1b-kube-api-access-7msqh\") pod \"8c17f596-473c-49f6-b2f2-56ee94f47c1b\" (UID: \"8c17f596-473c-49f6-b2f2-56ee94f47c1b\") " Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.946746 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a3668763-e276-4dc6-bacc-3854d2a49983-operator-scripts\") pod \"a3668763-e276-4dc6-bacc-3854d2a49983\" (UID: \"a3668763-e276-4dc6-bacc-3854d2a49983\") " Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.946787 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1db82525-47aa-465b-b638-0d954b02e9b2-operator-scripts\") pod \"1db82525-47aa-465b-b638-0d954b02e9b2\" (UID: \"1db82525-47aa-465b-b638-0d954b02e9b2\") " Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.946823 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6vx8\" (UniqueName: \"kubernetes.io/projected/a3668763-e276-4dc6-bacc-3854d2a49983-kube-api-access-b6vx8\") pod \"a3668763-e276-4dc6-bacc-3854d2a49983\" (UID: \"a3668763-e276-4dc6-bacc-3854d2a49983\") " Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.946853 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c17f596-473c-49f6-b2f2-56ee94f47c1b-operator-scripts\") pod \"8c17f596-473c-49f6-b2f2-56ee94f47c1b\" (UID: \"8c17f596-473c-49f6-b2f2-56ee94f47c1b\") " Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.946912 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d9195a1-3aeb-466d-970a-8abd134135c8-operator-scripts\") pod \"3d9195a1-3aeb-466d-970a-8abd134135c8\" (UID: \"3d9195a1-3aeb-466d-970a-8abd134135c8\") " Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.947594 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d9195a1-3aeb-466d-970a-8abd134135c8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3d9195a1-3aeb-466d-970a-8abd134135c8" (UID: "3d9195a1-3aeb-466d-970a-8abd134135c8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.947664 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c17f596-473c-49f6-b2f2-56ee94f47c1b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8c17f596-473c-49f6-b2f2-56ee94f47c1b" (UID: "8c17f596-473c-49f6-b2f2-56ee94f47c1b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.947730 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a3668763-e276-4dc6-bacc-3854d2a49983-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a3668763-e276-4dc6-bacc-3854d2a49983" (UID: "a3668763-e276-4dc6-bacc-3854d2a49983"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.948073 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1db82525-47aa-465b-b638-0d954b02e9b2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1db82525-47aa-465b-b638-0d954b02e9b2" (UID: "1db82525-47aa-465b-b638-0d954b02e9b2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.950334 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1db82525-47aa-465b-b638-0d954b02e9b2-kube-api-access-v2fzb" (OuterVolumeSpecName: "kube-api-access-v2fzb") pod "1db82525-47aa-465b-b638-0d954b02e9b2" (UID: "1db82525-47aa-465b-b638-0d954b02e9b2"). InnerVolumeSpecName "kube-api-access-v2fzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.951772 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d9195a1-3aeb-466d-970a-8abd134135c8-kube-api-access-lsqmz" (OuterVolumeSpecName: "kube-api-access-lsqmz") pod "3d9195a1-3aeb-466d-970a-8abd134135c8" (UID: "3d9195a1-3aeb-466d-970a-8abd134135c8"). InnerVolumeSpecName "kube-api-access-lsqmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.952652 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3668763-e276-4dc6-bacc-3854d2a49983-kube-api-access-b6vx8" (OuterVolumeSpecName: "kube-api-access-b6vx8") pod "a3668763-e276-4dc6-bacc-3854d2a49983" (UID: "a3668763-e276-4dc6-bacc-3854d2a49983"). InnerVolumeSpecName "kube-api-access-b6vx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:23 crc kubenswrapper[4687]: I1125 09:24:23.961063 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c17f596-473c-49f6-b2f2-56ee94f47c1b-kube-api-access-7msqh" (OuterVolumeSpecName: "kube-api-access-7msqh") pod "8c17f596-473c-49f6-b2f2-56ee94f47c1b" (UID: "8c17f596-473c-49f6-b2f2-56ee94f47c1b"). InnerVolumeSpecName "kube-api-access-7msqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.049690 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lsqmz\" (UniqueName: \"kubernetes.io/projected/3d9195a1-3aeb-466d-970a-8abd134135c8-kube-api-access-lsqmz\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.049999 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7msqh\" (UniqueName: \"kubernetes.io/projected/8c17f596-473c-49f6-b2f2-56ee94f47c1b-kube-api-access-7msqh\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.050012 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a3668763-e276-4dc6-bacc-3854d2a49983-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.050021 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1db82525-47aa-465b-b638-0d954b02e9b2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.050030 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6vx8\" (UniqueName: \"kubernetes.io/projected/a3668763-e276-4dc6-bacc-3854d2a49983-kube-api-access-b6vx8\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.050057 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c17f596-473c-49f6-b2f2-56ee94f47c1b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.050069 4687 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d9195a1-3aeb-466d-970a-8abd134135c8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.050078 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2fzb\" (UniqueName: \"kubernetes.io/projected/1db82525-47aa-465b-b638-0d954b02e9b2-kube-api-access-v2fzb\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.124329 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-ll6h7" event={"ID":"3d9195a1-3aeb-466d-970a-8abd134135c8","Type":"ContainerDied","Data":"20b68fe3250e055785a0c3b7d67e54c7992c97c8590ab03f22486e83c299369a"} Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.125741 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20b68fe3250e055785a0c3b7d67e54c7992c97c8590ab03f22486e83c299369a" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.124345 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-ll6h7" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.126212 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-4ndrh" event={"ID":"a3668763-e276-4dc6-bacc-3854d2a49983","Type":"ContainerDied","Data":"d76def7052f1091fbdc38eb95d3fc7e8589c46ab32152a193ff12c69b23dcaa0"} Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.126245 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d76def7052f1091fbdc38eb95d3fc7e8589c46ab32152a193ff12c69b23dcaa0" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.126279 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-4ndrh" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.133658 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-6bad-account-create-2pbld" event={"ID":"4261462d-4b8e-4739-b79f-882c109aa8be","Type":"ContainerDied","Data":"ce9bb7d4e2aece38e15520becf5eea9559274b219c4abd380af1b455dcac8d7e"} Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.133702 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce9bb7d4e2aece38e15520becf5eea9559274b219c4abd380af1b455dcac8d7e" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.133668 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-6bad-account-create-2pbld" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.137227 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="0e89ebd4720ffd5c135c8bc00be72ce7345dd6f93bd878517e70d876f94fe463" exitCode=0 Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.137297 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"0e89ebd4720ffd5c135c8bc00be72ce7345dd6f93bd878517e70d876f94fe463"} Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.137612 4687 scope.go:117] "RemoveContainer" containerID="775e1f554d9dd2a0b079c1ff7e2f05e88c335de1a345eef583910fe573bfcecf" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.140027 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e925-account-create-wkg4s" event={"ID":"1db82525-47aa-465b-b638-0d954b02e9b2","Type":"ContainerDied","Data":"5c2be4cc33cc93bcaa053a059d60e767349ebd30be78e8cb019dfdd6f7ee9e8e"} Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.140592 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c2be4cc33cc93bcaa053a059d60e767349ebd30be78e8cb019dfdd6f7ee9e8e" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.140089 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e925-account-create-wkg4s" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.142671 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-991b-account-create-kdxhh" Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.144288 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-991b-account-create-kdxhh" event={"ID":"8c17f596-473c-49f6-b2f2-56ee94f47c1b","Type":"ContainerDied","Data":"5af0c187c1da3b59e77daf46c581798b201bc04ce630c5759490e2692798a74d"} Nov 25 09:24:24 crc kubenswrapper[4687]: I1125 09:24:24.144318 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5af0c187c1da3b59e77daf46c581798b201bc04ce630c5759490e2692798a74d" Nov 25 09:24:25 crc kubenswrapper[4687]: I1125 09:24:25.154291 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"026b07db34bc04118ebe444d3596042b6afeddebbaa71ea4729e8d639abf5885"} Nov 25 09:24:28 crc kubenswrapper[4687]: I1125 09:24:28.181798 4687 generic.go:334] "Generic (PLEG): container finished" podID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerID="672bc54fd6e8d9355659ba7cc4dfb76c01599310bb6adf00d8db903072dce0c8" exitCode=0 Nov 25 09:24:28 crc kubenswrapper[4687]: I1125 09:24:28.181887 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457","Type":"ContainerDied","Data":"672bc54fd6e8d9355659ba7cc4dfb76c01599310bb6adf00d8db903072dce0c8"} Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.264261 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wm5bb"] Nov 25 09:24:29 crc kubenswrapper[4687]: E1125 09:24:29.265778 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.265796 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon" Nov 25 09:24:29 crc kubenswrapper[4687]: E1125 09:24:29.265814 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4261462d-4b8e-4739-b79f-882c109aa8be" containerName="mariadb-account-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.265823 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4261462d-4b8e-4739-b79f-882c109aa8be" containerName="mariadb-account-create" Nov 25 09:24:29 crc kubenswrapper[4687]: E1125 09:24:29.265841 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon-log" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.265849 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon-log" Nov 25 09:24:29 crc kubenswrapper[4687]: E1125 09:24:29.265867 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3668763-e276-4dc6-bacc-3854d2a49983" containerName="mariadb-database-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.265874 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3668763-e276-4dc6-bacc-3854d2a49983" containerName="mariadb-database-create" Nov 25 09:24:29 crc kubenswrapper[4687]: E1125 09:24:29.265887 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1db82525-47aa-465b-b638-0d954b02e9b2" containerName="mariadb-account-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.265895 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="1db82525-47aa-465b-b638-0d954b02e9b2" containerName="mariadb-account-create" Nov 25 09:24:29 crc kubenswrapper[4687]: E1125 09:24:29.265917 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9195a1-3aeb-466d-970a-8abd134135c8" containerName="mariadb-database-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.265924 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9195a1-3aeb-466d-970a-8abd134135c8" containerName="mariadb-database-create" Nov 25 09:24:29 crc kubenswrapper[4687]: E1125 09:24:29.265937 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f850ec56-21a5-4782-8aa3-2a1f53ddadb2" containerName="mariadb-database-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.265945 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f850ec56-21a5-4782-8aa3-2a1f53ddadb2" containerName="mariadb-database-create" Nov 25 09:24:29 crc kubenswrapper[4687]: E1125 09:24:29.265958 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c17f596-473c-49f6-b2f2-56ee94f47c1b" containerName="mariadb-account-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.265965 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c17f596-473c-49f6-b2f2-56ee94f47c1b" containerName="mariadb-account-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.266179 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="1db82525-47aa-465b-b638-0d954b02e9b2" containerName="mariadb-account-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.266201 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="4261462d-4b8e-4739-b79f-882c109aa8be" containerName="mariadb-account-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.266220 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon-log" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.266231 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea89f490-5a54-46eb-9b1c-1eb96dd181da" containerName="horizon" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.266246 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3668763-e276-4dc6-bacc-3854d2a49983" containerName="mariadb-database-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.266261 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f850ec56-21a5-4782-8aa3-2a1f53ddadb2" containerName="mariadb-database-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.266271 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c17f596-473c-49f6-b2f2-56ee94f47c1b" containerName="mariadb-account-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.266285 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d9195a1-3aeb-466d-970a-8abd134135c8" containerName="mariadb-database-create" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.267129 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.268988 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-c7224" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.269569 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.270032 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.277897 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wm5bb"] Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.353104 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-config-data\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.353278 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.353341 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-scripts\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.353393 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnnth\" (UniqueName: \"kubernetes.io/projected/79c7ec90-6869-4eed-9ced-6ba0adfe7965-kube-api-access-qnnth\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.455444 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnnth\" (UniqueName: \"kubernetes.io/projected/79c7ec90-6869-4eed-9ced-6ba0adfe7965-kube-api-access-qnnth\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.455540 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-config-data\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.455660 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.455693 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-scripts\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.461996 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-scripts\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.462320 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.462678 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-config-data\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.477652 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnnth\" (UniqueName: \"kubernetes.io/projected/79c7ec90-6869-4eed-9ced-6ba0adfe7965-kube-api-access-qnnth\") pod \"nova-cell0-conductor-db-sync-wm5bb\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.588928 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.594019 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.594070 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.639349 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 09:24:29 crc kubenswrapper[4687]: I1125 09:24:29.650105 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 09:24:30 crc kubenswrapper[4687]: I1125 09:24:30.095759 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wm5bb"] Nov 25 09:24:30 crc kubenswrapper[4687]: I1125 09:24:30.202258 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wm5bb" event={"ID":"79c7ec90-6869-4eed-9ced-6ba0adfe7965","Type":"ContainerStarted","Data":"2e70d295acbc931f53ec0d904518bc5ab4908ed0607fd45c36bdd7f5dc43dddb"} Nov 25 09:24:30 crc kubenswrapper[4687]: I1125 09:24:30.202478 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:24:30 crc kubenswrapper[4687]: I1125 09:24:30.202524 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:24:30 crc kubenswrapper[4687]: I1125 09:24:30.525833 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:30 crc kubenswrapper[4687]: I1125 09:24:30.525883 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:30 crc kubenswrapper[4687]: I1125 09:24:30.557073 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:30 crc kubenswrapper[4687]: I1125 09:24:30.573156 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:31 crc kubenswrapper[4687]: I1125 09:24:31.210752 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:31 crc kubenswrapper[4687]: I1125 09:24:31.210790 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:32 crc kubenswrapper[4687]: I1125 09:24:32.334600 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 09:24:32 crc kubenswrapper[4687]: I1125 09:24:32.334930 4687 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:24:32 crc kubenswrapper[4687]: I1125 09:24:32.341982 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 09:24:33 crc kubenswrapper[4687]: I1125 09:24:33.359474 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:33 crc kubenswrapper[4687]: I1125 09:24:33.359565 4687 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:24:33 crc kubenswrapper[4687]: I1125 09:24:33.419945 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 09:24:39 crc kubenswrapper[4687]: I1125 09:24:39.298181 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wm5bb" event={"ID":"79c7ec90-6869-4eed-9ced-6ba0adfe7965","Type":"ContainerStarted","Data":"4de9d94df83cd6ce2747ad3482cd75aad445a50c7b3090b8554d29f5995714e9"} Nov 25 09:24:39 crc kubenswrapper[4687]: I1125 09:24:39.323554 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-wm5bb" podStartSLOduration=2.324105992 podStartE2EDuration="10.323538103s" podCreationTimestamp="2025-11-25 09:24:29 +0000 UTC" firstStartedPulling="2025-11-25 09:24:30.114314769 +0000 UTC m=+1265.167954487" lastFinishedPulling="2025-11-25 09:24:38.11374688 +0000 UTC m=+1273.167386598" observedRunningTime="2025-11-25 09:24:39.3171999 +0000 UTC m=+1274.370839618" watchObservedRunningTime="2025-11-25 09:24:39.323538103 +0000 UTC m=+1274.377177821" Nov 25 09:24:46 crc kubenswrapper[4687]: I1125 09:24:46.136923 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 09:24:49 crc kubenswrapper[4687]: I1125 09:24:49.389325 4687 generic.go:334] "Generic (PLEG): container finished" podID="79c7ec90-6869-4eed-9ced-6ba0adfe7965" containerID="4de9d94df83cd6ce2747ad3482cd75aad445a50c7b3090b8554d29f5995714e9" exitCode=0 Nov 25 09:24:49 crc kubenswrapper[4687]: I1125 09:24:49.389492 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wm5bb" event={"ID":"79c7ec90-6869-4eed-9ced-6ba0adfe7965","Type":"ContainerDied","Data":"4de9d94df83cd6ce2747ad3482cd75aad445a50c7b3090b8554d29f5995714e9"} Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.761790 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.862889 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-config-data\") pod \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.862945 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-combined-ca-bundle\") pod \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.862986 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-scripts\") pod \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.863060 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnnth\" (UniqueName: \"kubernetes.io/projected/79c7ec90-6869-4eed-9ced-6ba0adfe7965-kube-api-access-qnnth\") pod \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\" (UID: \"79c7ec90-6869-4eed-9ced-6ba0adfe7965\") " Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.873913 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79c7ec90-6869-4eed-9ced-6ba0adfe7965-kube-api-access-qnnth" (OuterVolumeSpecName: "kube-api-access-qnnth") pod "79c7ec90-6869-4eed-9ced-6ba0adfe7965" (UID: "79c7ec90-6869-4eed-9ced-6ba0adfe7965"). InnerVolumeSpecName "kube-api-access-qnnth". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.877017 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-scripts" (OuterVolumeSpecName: "scripts") pod "79c7ec90-6869-4eed-9ced-6ba0adfe7965" (UID: "79c7ec90-6869-4eed-9ced-6ba0adfe7965"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.901760 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-config-data" (OuterVolumeSpecName: "config-data") pod "79c7ec90-6869-4eed-9ced-6ba0adfe7965" (UID: "79c7ec90-6869-4eed-9ced-6ba0adfe7965"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.912780 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "79c7ec90-6869-4eed-9ced-6ba0adfe7965" (UID: "79c7ec90-6869-4eed-9ced-6ba0adfe7965"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.965771 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.966233 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnnth\" (UniqueName: \"kubernetes.io/projected/79c7ec90-6869-4eed-9ced-6ba0adfe7965-kube-api-access-qnnth\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.966307 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:50 crc kubenswrapper[4687]: I1125 09:24:50.966375 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c7ec90-6869-4eed-9ced-6ba0adfe7965-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:51 crc kubenswrapper[4687]: E1125 09:24:51.173951 4687 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf3d562b_dfcc_4c3c_b5f8_ff7f3e284457.slice/crio-conmon-431647bb5a94ba83ebdb5334e430fcba71f052f2cd2c2ec3a9db257c615b9701.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf3d562b_dfcc_4c3c_b5f8_ff7f3e284457.slice/crio-431647bb5a94ba83ebdb5334e430fcba71f052f2cd2c2ec3a9db257c615b9701.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.408451 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wm5bb" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.408382 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wm5bb" event={"ID":"79c7ec90-6869-4eed-9ced-6ba0adfe7965","Type":"ContainerDied","Data":"2e70d295acbc931f53ec0d904518bc5ab4908ed0607fd45c36bdd7f5dc43dddb"} Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.409887 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e70d295acbc931f53ec0d904518bc5ab4908ed0607fd45c36bdd7f5dc43dddb" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.411853 4687 generic.go:334] "Generic (PLEG): container finished" podID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerID="431647bb5a94ba83ebdb5334e430fcba71f052f2cd2c2ec3a9db257c615b9701" exitCode=137 Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.411887 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457","Type":"ContainerDied","Data":"431647bb5a94ba83ebdb5334e430fcba71f052f2cd2c2ec3a9db257c615b9701"} Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.581144 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:24:51 crc kubenswrapper[4687]: E1125 09:24:51.582722 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79c7ec90-6869-4eed-9ced-6ba0adfe7965" containerName="nova-cell0-conductor-db-sync" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.582804 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="79c7ec90-6869-4eed-9ced-6ba0adfe7965" containerName="nova-cell0-conductor-db-sync" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.583104 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="79c7ec90-6869-4eed-9ced-6ba0adfe7965" containerName="nova-cell0-conductor-db-sync" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.586251 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.591338 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-c7224" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.591765 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.602125 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.677681 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e891aa7c-cb45-432d-9a15-1194e9700272-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"e891aa7c-cb45-432d-9a15-1194e9700272\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.677838 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtt75\" (UniqueName: \"kubernetes.io/projected/e891aa7c-cb45-432d-9a15-1194e9700272-kube-api-access-vtt75\") pod \"nova-cell0-conductor-0\" (UID: \"e891aa7c-cb45-432d-9a15-1194e9700272\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.677889 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e891aa7c-cb45-432d-9a15-1194e9700272-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"e891aa7c-cb45-432d-9a15-1194e9700272\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.778733 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtt75\" (UniqueName: \"kubernetes.io/projected/e891aa7c-cb45-432d-9a15-1194e9700272-kube-api-access-vtt75\") pod \"nova-cell0-conductor-0\" (UID: \"e891aa7c-cb45-432d-9a15-1194e9700272\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.778803 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e891aa7c-cb45-432d-9a15-1194e9700272-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"e891aa7c-cb45-432d-9a15-1194e9700272\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.778845 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e891aa7c-cb45-432d-9a15-1194e9700272-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"e891aa7c-cb45-432d-9a15-1194e9700272\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.784577 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e891aa7c-cb45-432d-9a15-1194e9700272-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"e891aa7c-cb45-432d-9a15-1194e9700272\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.784722 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.789264 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e891aa7c-cb45-432d-9a15-1194e9700272-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"e891aa7c-cb45-432d-9a15-1194e9700272\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.798443 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtt75\" (UniqueName: \"kubernetes.io/projected/e891aa7c-cb45-432d-9a15-1194e9700272-kube-api-access-vtt75\") pod \"nova-cell0-conductor-0\" (UID: \"e891aa7c-cb45-432d-9a15-1194e9700272\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.879873 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-config-data\") pod \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.880003 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-combined-ca-bundle\") pod \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.880047 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-sg-core-conf-yaml\") pod \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.880087 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-run-httpd\") pod \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.880159 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-scripts\") pod \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.880190 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-log-httpd\") pod \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.880240 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hgbf\" (UniqueName: \"kubernetes.io/projected/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-kube-api-access-5hgbf\") pod \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\" (UID: \"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457\") " Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.881684 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" (UID: "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.882063 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" (UID: "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.885002 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-kube-api-access-5hgbf" (OuterVolumeSpecName: "kube-api-access-5hgbf") pod "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" (UID: "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457"). InnerVolumeSpecName "kube-api-access-5hgbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.885348 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-scripts" (OuterVolumeSpecName: "scripts") pod "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" (UID: "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.905310 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" (UID: "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.915746 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.952677 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" (UID: "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.972696 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-config-data" (OuterVolumeSpecName: "config-data") pod "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" (UID: "af3d562b-dfcc-4c3c-b5f8-ff7f3e284457"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.982626 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.982656 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.982668 4687 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.982677 4687 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.982685 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.982693 4687 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:51 crc kubenswrapper[4687]: I1125 09:24:51.982702 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hgbf\" (UniqueName: \"kubernetes.io/projected/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457-kube-api-access-5hgbf\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.376699 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:24:52 crc kubenswrapper[4687]: W1125 09:24:52.378151 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode891aa7c_cb45_432d_9a15_1194e9700272.slice/crio-a4b846cd25eb269743ddba7dfb31a341657fe627cca19fd29180e17b99321cba WatchSource:0}: Error finding container a4b846cd25eb269743ddba7dfb31a341657fe627cca19fd29180e17b99321cba: Status 404 returned error can't find the container with id a4b846cd25eb269743ddba7dfb31a341657fe627cca19fd29180e17b99321cba Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.426740 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"af3d562b-dfcc-4c3c-b5f8-ff7f3e284457","Type":"ContainerDied","Data":"3395d24a6abf322703546fe6d39413c0b6478420c075346199c15fbfb57239c3"} Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.426792 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.426823 4687 scope.go:117] "RemoveContainer" containerID="431647bb5a94ba83ebdb5334e430fcba71f052f2cd2c2ec3a9db257c615b9701" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.428223 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"e891aa7c-cb45-432d-9a15-1194e9700272","Type":"ContainerStarted","Data":"a4b846cd25eb269743ddba7dfb31a341657fe627cca19fd29180e17b99321cba"} Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.454353 4687 scope.go:117] "RemoveContainer" containerID="ea8bd60d99f129b660274b1b6ebddd7d7adfbe698613324272937802a50fd44d" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.465203 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.478567 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.491598 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:52 crc kubenswrapper[4687]: E1125 09:24:52.492025 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="proxy-httpd" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.492050 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="proxy-httpd" Nov 25 09:24:52 crc kubenswrapper[4687]: E1125 09:24:52.492072 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="ceilometer-central-agent" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.492081 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="ceilometer-central-agent" Nov 25 09:24:52 crc kubenswrapper[4687]: E1125 09:24:52.492093 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="sg-core" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.492101 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="sg-core" Nov 25 09:24:52 crc kubenswrapper[4687]: E1125 09:24:52.492133 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="ceilometer-notification-agent" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.492141 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="ceilometer-notification-agent" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.492349 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="ceilometer-central-agent" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.492377 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="ceilometer-notification-agent" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.492393 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="sg-core" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.492409 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" containerName="proxy-httpd" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.494378 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.496588 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.496806 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.496916 4687 scope.go:117] "RemoveContainer" containerID="20c8d7c53e8c1243047227417a10d2abc6e193b912587212062828db3b95d181" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.522407 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.534482 4687 scope.go:117] "RemoveContainer" containerID="672bc54fd6e8d9355659ba7cc4dfb76c01599310bb6adf00d8db903072dce0c8" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.594096 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-run-httpd\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.594225 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z5vb\" (UniqueName: \"kubernetes.io/projected/88fce615-5e48-4041-b9b3-409bf6a29166-kube-api-access-4z5vb\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.594284 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.594392 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-config-data\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.594431 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-log-httpd\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.594473 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.594583 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-scripts\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.696641 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-config-data\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.696700 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-log-httpd\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.696756 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.696872 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-scripts\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.696909 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-run-httpd\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.697542 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-log-httpd\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.697944 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-run-httpd\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.698126 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z5vb\" (UniqueName: \"kubernetes.io/projected/88fce615-5e48-4041-b9b3-409bf6a29166-kube-api-access-4z5vb\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.698155 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.703264 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.703688 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.703714 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-scripts\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.704098 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-config-data\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.724433 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z5vb\" (UniqueName: \"kubernetes.io/projected/88fce615-5e48-4041-b9b3-409bf6a29166-kube-api-access-4z5vb\") pod \"ceilometer-0\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " pod="openstack/ceilometer-0" Nov 25 09:24:52 crc kubenswrapper[4687]: I1125 09:24:52.831048 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:24:53 crc kubenswrapper[4687]: I1125 09:24:53.338464 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:24:53 crc kubenswrapper[4687]: I1125 09:24:53.446410 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"e891aa7c-cb45-432d-9a15-1194e9700272","Type":"ContainerStarted","Data":"6794c106ace2b99d887a2d5ea1cd464c852d2be81547e9f75092d10dad8db70b"} Nov 25 09:24:53 crc kubenswrapper[4687]: I1125 09:24:53.448147 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 09:24:53 crc kubenswrapper[4687]: I1125 09:24:53.450035 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"88fce615-5e48-4041-b9b3-409bf6a29166","Type":"ContainerStarted","Data":"26fad4ea2cbcf1d64323df8227206fa2cb699cb4994cc904dc5716976b65343c"} Nov 25 09:24:53 crc kubenswrapper[4687]: I1125 09:24:53.466611 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.466592449 podStartE2EDuration="2.466592449s" podCreationTimestamp="2025-11-25 09:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:24:53.460254577 +0000 UTC m=+1288.513894295" watchObservedRunningTime="2025-11-25 09:24:53.466592449 +0000 UTC m=+1288.520232167" Nov 25 09:24:53 crc kubenswrapper[4687]: I1125 09:24:53.756209 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af3d562b-dfcc-4c3c-b5f8-ff7f3e284457" path="/var/lib/kubelet/pods/af3d562b-dfcc-4c3c-b5f8-ff7f3e284457/volumes" Nov 25 09:24:54 crc kubenswrapper[4687]: I1125 09:24:54.463900 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"88fce615-5e48-4041-b9b3-409bf6a29166","Type":"ContainerStarted","Data":"f6289cd9775038ba529d1b864f27176b6c934b0cf7617233a23f14d9df43048b"} Nov 25 09:24:55 crc kubenswrapper[4687]: I1125 09:24:55.476175 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"88fce615-5e48-4041-b9b3-409bf6a29166","Type":"ContainerStarted","Data":"c436591ce9a835b093718a535f6cb9e6c2388018bddcde9122c79b2d950b52b2"} Nov 25 09:24:55 crc kubenswrapper[4687]: I1125 09:24:55.476846 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"88fce615-5e48-4041-b9b3-409bf6a29166","Type":"ContainerStarted","Data":"c900ffadeb11cde8958d08f0543cb428a3a85902d5e6b33a329f0d018c714155"} Nov 25 09:24:57 crc kubenswrapper[4687]: I1125 09:24:57.494133 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"88fce615-5e48-4041-b9b3-409bf6a29166","Type":"ContainerStarted","Data":"8d2038133110eacd56fe1fc055ab94cfeb15af8891520ec151fc2c897c7eda39"} Nov 25 09:24:57 crc kubenswrapper[4687]: I1125 09:24:57.494898 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:25:01 crc kubenswrapper[4687]: I1125 09:25:01.954728 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 09:25:01 crc kubenswrapper[4687]: I1125 09:25:01.982218 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=6.841475264 podStartE2EDuration="9.982195292s" podCreationTimestamp="2025-11-25 09:24:52 +0000 UTC" firstStartedPulling="2025-11-25 09:24:53.343648646 +0000 UTC m=+1288.397288364" lastFinishedPulling="2025-11-25 09:24:56.484368674 +0000 UTC m=+1291.538008392" observedRunningTime="2025-11-25 09:24:57.525423753 +0000 UTC m=+1292.579063511" watchObservedRunningTime="2025-11-25 09:25:01.982195292 +0000 UTC m=+1297.035835020" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.477767 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-fp7dc"] Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.478865 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.482470 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.482470 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.497348 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-fp7dc"] Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.499879 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.499929 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-config-data\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.500198 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcvnq\" (UniqueName: \"kubernetes.io/projected/7ced595e-30dd-4300-b8b6-df549003f298-kube-api-access-xcvnq\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.500242 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-scripts\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.602070 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcvnq\" (UniqueName: \"kubernetes.io/projected/7ced595e-30dd-4300-b8b6-df549003f298-kube-api-access-xcvnq\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.602123 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-scripts\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.602194 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.602218 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-config-data\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.608684 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.620813 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-scripts\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.621276 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcvnq\" (UniqueName: \"kubernetes.io/projected/7ced595e-30dd-4300-b8b6-df549003f298-kube-api-access-xcvnq\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.621406 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-config-data\") pod \"nova-cell0-cell-mapping-fp7dc\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.678826 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.681205 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.688621 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.699761 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.709747 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.709795 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-config-data\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.709827 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cb72006-ba6e-4b06-9033-7ffb99c38416-logs\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.709875 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jg84d\" (UniqueName: \"kubernetes.io/projected/8cb72006-ba6e-4b06-9033-7ffb99c38416-kube-api-access-jg84d\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.775728 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.776939 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.788365 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.796337 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.797137 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.799957 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.811455 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.811844 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.811870 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-config-data\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.811897 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cb72006-ba6e-4b06-9033-7ffb99c38416-logs\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.811938 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jg84d\" (UniqueName: \"kubernetes.io/projected/8cb72006-ba6e-4b06-9033-7ffb99c38416-kube-api-access-jg84d\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.813385 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cb72006-ba6e-4b06-9033-7ffb99c38416-logs\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.826400 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.827058 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-config-data\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.838687 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.859340 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.904629 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jg84d\" (UniqueName: \"kubernetes.io/projected/8cb72006-ba6e-4b06-9033-7ffb99c38416-kube-api-access-jg84d\") pod \"nova-api-0\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " pod="openstack/nova-api-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.914581 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.914652 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sshwx\" (UniqueName: \"kubernetes.io/projected/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-kube-api-access-sshwx\") pod \"nova-scheduler-0\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.914682 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-config-data\") pod \"nova-scheduler-0\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.914736 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.914767 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:02 crc kubenswrapper[4687]: I1125 09:25:02.914816 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-545zc\" (UniqueName: \"kubernetes.io/projected/8e42ba40-1de7-4098-84a1-fe6673aedecf-kube-api-access-545zc\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.016604 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sshwx\" (UniqueName: \"kubernetes.io/projected/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-kube-api-access-sshwx\") pod \"nova-scheduler-0\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.016984 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-config-data\") pod \"nova-scheduler-0\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.017736 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.017779 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.017865 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-545zc\" (UniqueName: \"kubernetes.io/projected/8e42ba40-1de7-4098-84a1-fe6673aedecf-kube-api-access-545zc\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.018092 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.036382 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.037550 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-config-data\") pod \"nova-scheduler-0\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.045903 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.051521 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.051766 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.052989 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.066128 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.078336 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.098294 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sshwx\" (UniqueName: \"kubernetes.io/projected/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-kube-api-access-sshwx\") pod \"nova-scheduler-0\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.113328 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.113525 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.115956 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-545zc\" (UniqueName: \"kubernetes.io/projected/8e42ba40-1de7-4098-84a1-fe6673aedecf-kube-api-access-545zc\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.199623 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-67tr7"] Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.201363 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.223192 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-67tr7"] Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.224181 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-config-data\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.224228 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.224320 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b34621cb-726c-4a5a-9f6b-62108919b986-logs\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.224369 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96jzk\" (UniqueName: \"kubernetes.io/projected/b34621cb-726c-4a5a-9f6b-62108919b986-kube-api-access-96jzk\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.276769 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.326181 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.326240 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.326292 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-svc\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.326315 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-config\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.326338 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b34621cb-726c-4a5a-9f6b-62108919b986-logs\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.326353 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.326381 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5m7z\" (UniqueName: \"kubernetes.io/projected/dbf632e8-4b43-4ff2-991b-94111453b58b-kube-api-access-g5m7z\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.326418 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96jzk\" (UniqueName: \"kubernetes.io/projected/b34621cb-726c-4a5a-9f6b-62108919b986-kube-api-access-96jzk\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.326436 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.326478 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-config-data\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.327625 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b34621cb-726c-4a5a-9f6b-62108919b986-logs\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.345830 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-config-data\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.346393 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.352118 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96jzk\" (UniqueName: \"kubernetes.io/projected/b34621cb-726c-4a5a-9f6b-62108919b986-kube-api-access-96jzk\") pod \"nova-metadata-0\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.405004 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.428323 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.428400 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-svc\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.428431 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-config\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.428454 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.428482 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5m7z\" (UniqueName: \"kubernetes.io/projected/dbf632e8-4b43-4ff2-991b-94111453b58b-kube-api-access-g5m7z\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.429761 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-config\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.430349 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.430979 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-svc\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.431000 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.432058 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.428537 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.467237 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5m7z\" (UniqueName: \"kubernetes.io/projected/dbf632e8-4b43-4ff2-991b-94111453b58b-kube-api-access-g5m7z\") pod \"dnsmasq-dns-bccf8f775-67tr7\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.527029 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.726709 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-fp7dc"] Nov 25 09:25:03 crc kubenswrapper[4687]: W1125 09:25:03.751916 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ced595e_30dd_4300_b8b6_df549003f298.slice/crio-07630f9b3189a6994cc37af8b03e917c327d30831e0b107c944420a158b4ad21 WatchSource:0}: Error finding container 07630f9b3189a6994cc37af8b03e917c327d30831e0b107c944420a158b4ad21: Status 404 returned error can't find the container with id 07630f9b3189a6994cc37af8b03e917c327d30831e0b107c944420a158b4ad21 Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.772957 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.915809 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.972944 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6nqsr"] Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.974218 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.976974 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.977125 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 09:25:03 crc kubenswrapper[4687]: I1125 09:25:03.989295 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6nqsr"] Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.107550 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:25:04 crc kubenswrapper[4687]: W1125 09:25:04.113116 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e42ba40_1de7_4098_84a1_fe6673aedecf.slice/crio-be2f5b3664d86844373272da0c889dd8e26ce7c914bd07d5997c0ba8eb105515 WatchSource:0}: Error finding container be2f5b3664d86844373272da0c889dd8e26ce7c914bd07d5997c0ba8eb105515: Status 404 returned error can't find the container with id be2f5b3664d86844373272da0c889dd8e26ce7c914bd07d5997c0ba8eb105515 Nov 25 09:25:04 crc kubenswrapper[4687]: W1125 09:25:04.115157 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb34621cb_726c_4a5a_9f6b_62108919b986.slice/crio-6395f000bacf6d90a6426625c5c41c5e2c8d834dc92d7ee07b680e73550ef71e WatchSource:0}: Error finding container 6395f000bacf6d90a6426625c5c41c5e2c8d834dc92d7ee07b680e73550ef71e: Status 404 returned error can't find the container with id 6395f000bacf6d90a6426625c5c41c5e2c8d834dc92d7ee07b680e73550ef71e Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.124304 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.160063 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-config-data\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.160137 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-scripts\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.160183 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.160244 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-298g8\" (UniqueName: \"kubernetes.io/projected/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-kube-api-access-298g8\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.275293 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-298g8\" (UniqueName: \"kubernetes.io/projected/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-kube-api-access-298g8\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.275603 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-config-data\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.275681 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-scripts\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.275713 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.276809 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-67tr7"] Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.281290 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-config-data\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.281369 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-scripts\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.290075 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.301761 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-298g8\" (UniqueName: \"kubernetes.io/projected/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-kube-api-access-298g8\") pod \"nova-cell1-conductor-db-sync-6nqsr\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.323623 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.610661 4687 generic.go:334] "Generic (PLEG): container finished" podID="dbf632e8-4b43-4ff2-991b-94111453b58b" containerID="8105cb68e02ead8876e028d055fe33bca39e940ab7c9f1bb316ffa57ed435646" exitCode=0 Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.610936 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" event={"ID":"dbf632e8-4b43-4ff2-991b-94111453b58b","Type":"ContainerDied","Data":"8105cb68e02ead8876e028d055fe33bca39e940ab7c9f1bb316ffa57ed435646"} Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.610960 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" event={"ID":"dbf632e8-4b43-4ff2-991b-94111453b58b","Type":"ContainerStarted","Data":"53831e7b61e4ef5681e7cd2e5f67929ad3a6b7e399596a6a806dab549aa15a5f"} Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.616290 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8e42ba40-1de7-4098-84a1-fe6673aedecf","Type":"ContainerStarted","Data":"be2f5b3664d86844373272da0c889dd8e26ce7c914bd07d5997c0ba8eb105515"} Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.626904 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"083a6ba3-a4ab-4f50-948d-bb7e02c0f886","Type":"ContainerStarted","Data":"25a2991cc395a97295e9db6fcf9464b699f676cf0037a1e1ea91b1b8a014e692"} Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.628407 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8cb72006-ba6e-4b06-9033-7ffb99c38416","Type":"ContainerStarted","Data":"e2f5c1cdc673e4cd46acb06c47febcfca0192dfefbc2e8c022d23a8c67f3e691"} Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.634512 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fp7dc" event={"ID":"7ced595e-30dd-4300-b8b6-df549003f298","Type":"ContainerStarted","Data":"36dcaced308de8a14b8e0fa94c3bc5987d0b6dea9eea2dd787cc9622583dd1d4"} Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.634568 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fp7dc" event={"ID":"7ced595e-30dd-4300-b8b6-df549003f298","Type":"ContainerStarted","Data":"07630f9b3189a6994cc37af8b03e917c327d30831e0b107c944420a158b4ad21"} Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.655009 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b34621cb-726c-4a5a-9f6b-62108919b986","Type":"ContainerStarted","Data":"6395f000bacf6d90a6426625c5c41c5e2c8d834dc92d7ee07b680e73550ef71e"} Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.677450 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-fp7dc" podStartSLOduration=2.677427867 podStartE2EDuration="2.677427867s" podCreationTimestamp="2025-11-25 09:25:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:04.663829347 +0000 UTC m=+1299.717469065" watchObservedRunningTime="2025-11-25 09:25:04.677427867 +0000 UTC m=+1299.731067585" Nov 25 09:25:04 crc kubenswrapper[4687]: I1125 09:25:04.825528 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6nqsr"] Nov 25 09:25:05 crc kubenswrapper[4687]: I1125 09:25:05.725170 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" event={"ID":"dbf632e8-4b43-4ff2-991b-94111453b58b","Type":"ContainerStarted","Data":"e7b7434c73baa55375567695d5fd34ed47aa5a940b91285096fc2b8f59a7d22d"} Nov 25 09:25:05 crc kubenswrapper[4687]: I1125 09:25:05.726967 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:05 crc kubenswrapper[4687]: I1125 09:25:05.772590 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" podStartSLOduration=2.772572722 podStartE2EDuration="2.772572722s" podCreationTimestamp="2025-11-25 09:25:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:05.756962786 +0000 UTC m=+1300.810602514" watchObservedRunningTime="2025-11-25 09:25:05.772572722 +0000 UTC m=+1300.826212440" Nov 25 09:25:05 crc kubenswrapper[4687]: I1125 09:25:05.806758 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6nqsr" event={"ID":"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a","Type":"ContainerStarted","Data":"9072d01244da2515ab0e242e0a9776bf583d4df342452cd2c72d2a5c77ed4fff"} Nov 25 09:25:05 crc kubenswrapper[4687]: I1125 09:25:05.806801 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6nqsr" event={"ID":"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a","Type":"ContainerStarted","Data":"6746e1344e6a935acc1e7b7ef7c4d5466b9271708ebee67bcb1f9cda3f89d215"} Nov 25 09:25:05 crc kubenswrapper[4687]: I1125 09:25:05.936205 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-6nqsr" podStartSLOduration=2.936178645 podStartE2EDuration="2.936178645s" podCreationTimestamp="2025-11-25 09:25:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:05.934723826 +0000 UTC m=+1300.988363544" watchObservedRunningTime="2025-11-25 09:25:05.936178645 +0000 UTC m=+1300.989818363" Nov 25 09:25:07 crc kubenswrapper[4687]: I1125 09:25:07.556015 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:25:07 crc kubenswrapper[4687]: I1125 09:25:07.564588 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:07 crc kubenswrapper[4687]: I1125 09:25:07.817052 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b34621cb-726c-4a5a-9f6b-62108919b986","Type":"ContainerStarted","Data":"7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632"} Nov 25 09:25:07 crc kubenswrapper[4687]: I1125 09:25:07.818633 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8e42ba40-1de7-4098-84a1-fe6673aedecf","Type":"ContainerStarted","Data":"d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81"} Nov 25 09:25:07 crc kubenswrapper[4687]: I1125 09:25:07.818691 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="8e42ba40-1de7-4098-84a1-fe6673aedecf" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81" gracePeriod=30 Nov 25 09:25:07 crc kubenswrapper[4687]: I1125 09:25:07.841873 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.687722698 podStartE2EDuration="5.841857181s" podCreationTimestamp="2025-11-25 09:25:02 +0000 UTC" firstStartedPulling="2025-11-25 09:25:04.11592899 +0000 UTC m=+1299.169568708" lastFinishedPulling="2025-11-25 09:25:07.270063473 +0000 UTC m=+1302.323703191" observedRunningTime="2025-11-25 09:25:07.840054273 +0000 UTC m=+1302.893693991" watchObservedRunningTime="2025-11-25 09:25:07.841857181 +0000 UTC m=+1302.895496899" Nov 25 09:25:08 crc kubenswrapper[4687]: I1125 09:25:08.277596 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:08 crc kubenswrapper[4687]: I1125 09:25:08.839491 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b34621cb-726c-4a5a-9f6b-62108919b986","Type":"ContainerStarted","Data":"f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89"} Nov 25 09:25:08 crc kubenswrapper[4687]: I1125 09:25:08.839648 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b34621cb-726c-4a5a-9f6b-62108919b986" containerName="nova-metadata-log" containerID="cri-o://7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632" gracePeriod=30 Nov 25 09:25:08 crc kubenswrapper[4687]: I1125 09:25:08.839949 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b34621cb-726c-4a5a-9f6b-62108919b986" containerName="nova-metadata-metadata" containerID="cri-o://f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89" gracePeriod=30 Nov 25 09:25:08 crc kubenswrapper[4687]: I1125 09:25:08.843169 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"083a6ba3-a4ab-4f50-948d-bb7e02c0f886","Type":"ContainerStarted","Data":"90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae"} Nov 25 09:25:08 crc kubenswrapper[4687]: I1125 09:25:08.848859 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8cb72006-ba6e-4b06-9033-7ffb99c38416","Type":"ContainerStarted","Data":"0efc4090de6c6bc00dfa67ad88d372225ad91483575a19d88bfa54152f9fed34"} Nov 25 09:25:08 crc kubenswrapper[4687]: I1125 09:25:08.848909 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8cb72006-ba6e-4b06-9033-7ffb99c38416","Type":"ContainerStarted","Data":"64d8f8d2d305a10c0d6578ff4fe5b91e62bed4882ebfcf1f85a1efb3abd9d760"} Nov 25 09:25:08 crc kubenswrapper[4687]: I1125 09:25:08.868739 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.716679349 podStartE2EDuration="6.868718795s" podCreationTimestamp="2025-11-25 09:25:02 +0000 UTC" firstStartedPulling="2025-11-25 09:25:04.118024977 +0000 UTC m=+1299.171664695" lastFinishedPulling="2025-11-25 09:25:07.270064423 +0000 UTC m=+1302.323704141" observedRunningTime="2025-11-25 09:25:08.863664076 +0000 UTC m=+1303.917303834" watchObservedRunningTime="2025-11-25 09:25:08.868718795 +0000 UTC m=+1303.922358523" Nov 25 09:25:08 crc kubenswrapper[4687]: I1125 09:25:08.893092 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.430896111 podStartE2EDuration="6.893072378s" podCreationTimestamp="2025-11-25 09:25:02 +0000 UTC" firstStartedPulling="2025-11-25 09:25:03.787024027 +0000 UTC m=+1298.840663745" lastFinishedPulling="2025-11-25 09:25:07.249200294 +0000 UTC m=+1302.302840012" observedRunningTime="2025-11-25 09:25:08.883773854 +0000 UTC m=+1303.937413592" watchObservedRunningTime="2025-11-25 09:25:08.893072378 +0000 UTC m=+1303.946712096" Nov 25 09:25:08 crc kubenswrapper[4687]: I1125 09:25:08.901048 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.578441097 podStartE2EDuration="6.901027896s" podCreationTimestamp="2025-11-25 09:25:02 +0000 UTC" firstStartedPulling="2025-11-25 09:25:03.926612635 +0000 UTC m=+1298.980252353" lastFinishedPulling="2025-11-25 09:25:07.249199444 +0000 UTC m=+1302.302839152" observedRunningTime="2025-11-25 09:25:08.898489406 +0000 UTC m=+1303.952129114" watchObservedRunningTime="2025-11-25 09:25:08.901027896 +0000 UTC m=+1303.954667614" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.469872 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.609880 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-combined-ca-bundle\") pod \"b34621cb-726c-4a5a-9f6b-62108919b986\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.610031 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b34621cb-726c-4a5a-9f6b-62108919b986-logs\") pod \"b34621cb-726c-4a5a-9f6b-62108919b986\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.610061 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96jzk\" (UniqueName: \"kubernetes.io/projected/b34621cb-726c-4a5a-9f6b-62108919b986-kube-api-access-96jzk\") pod \"b34621cb-726c-4a5a-9f6b-62108919b986\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.610135 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-config-data\") pod \"b34621cb-726c-4a5a-9f6b-62108919b986\" (UID: \"b34621cb-726c-4a5a-9f6b-62108919b986\") " Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.610796 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b34621cb-726c-4a5a-9f6b-62108919b986-logs" (OuterVolumeSpecName: "logs") pod "b34621cb-726c-4a5a-9f6b-62108919b986" (UID: "b34621cb-726c-4a5a-9f6b-62108919b986"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.611447 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b34621cb-726c-4a5a-9f6b-62108919b986-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.618728 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b34621cb-726c-4a5a-9f6b-62108919b986-kube-api-access-96jzk" (OuterVolumeSpecName: "kube-api-access-96jzk") pod "b34621cb-726c-4a5a-9f6b-62108919b986" (UID: "b34621cb-726c-4a5a-9f6b-62108919b986"). InnerVolumeSpecName "kube-api-access-96jzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.644655 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-config-data" (OuterVolumeSpecName: "config-data") pod "b34621cb-726c-4a5a-9f6b-62108919b986" (UID: "b34621cb-726c-4a5a-9f6b-62108919b986"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.652415 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b34621cb-726c-4a5a-9f6b-62108919b986" (UID: "b34621cb-726c-4a5a-9f6b-62108919b986"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.712735 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96jzk\" (UniqueName: \"kubernetes.io/projected/b34621cb-726c-4a5a-9f6b-62108919b986-kube-api-access-96jzk\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.712773 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.712787 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b34621cb-726c-4a5a-9f6b-62108919b986-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.860084 4687 generic.go:334] "Generic (PLEG): container finished" podID="b34621cb-726c-4a5a-9f6b-62108919b986" containerID="f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89" exitCode=0 Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.860120 4687 generic.go:334] "Generic (PLEG): container finished" podID="b34621cb-726c-4a5a-9f6b-62108919b986" containerID="7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632" exitCode=143 Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.860180 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.860174 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b34621cb-726c-4a5a-9f6b-62108919b986","Type":"ContainerDied","Data":"f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89"} Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.860256 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b34621cb-726c-4a5a-9f6b-62108919b986","Type":"ContainerDied","Data":"7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632"} Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.860288 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b34621cb-726c-4a5a-9f6b-62108919b986","Type":"ContainerDied","Data":"6395f000bacf6d90a6426625c5c41c5e2c8d834dc92d7ee07b680e73550ef71e"} Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.860318 4687 scope.go:117] "RemoveContainer" containerID="f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.890071 4687 scope.go:117] "RemoveContainer" containerID="7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.898165 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.913407 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.933921 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:09 crc kubenswrapper[4687]: E1125 09:25:09.934294 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b34621cb-726c-4a5a-9f6b-62108919b986" containerName="nova-metadata-metadata" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.934310 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b34621cb-726c-4a5a-9f6b-62108919b986" containerName="nova-metadata-metadata" Nov 25 09:25:09 crc kubenswrapper[4687]: E1125 09:25:09.934330 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b34621cb-726c-4a5a-9f6b-62108919b986" containerName="nova-metadata-log" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.934336 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b34621cb-726c-4a5a-9f6b-62108919b986" containerName="nova-metadata-log" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.934515 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b34621cb-726c-4a5a-9f6b-62108919b986" containerName="nova-metadata-metadata" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.934532 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b34621cb-726c-4a5a-9f6b-62108919b986" containerName="nova-metadata-log" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.935392 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.936955 4687 scope.go:117] "RemoveContainer" containerID="f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.937390 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.937610 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 09:25:09 crc kubenswrapper[4687]: E1125 09:25:09.937752 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89\": container with ID starting with f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89 not found: ID does not exist" containerID="f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.937804 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89"} err="failed to get container status \"f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89\": rpc error: code = NotFound desc = could not find container \"f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89\": container with ID starting with f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89 not found: ID does not exist" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.937838 4687 scope.go:117] "RemoveContainer" containerID="7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632" Nov 25 09:25:09 crc kubenswrapper[4687]: E1125 09:25:09.942622 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632\": container with ID starting with 7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632 not found: ID does not exist" containerID="7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.942678 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632"} err="failed to get container status \"7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632\": rpc error: code = NotFound desc = could not find container \"7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632\": container with ID starting with 7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632 not found: ID does not exist" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.942712 4687 scope.go:117] "RemoveContainer" containerID="f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.946313 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89"} err="failed to get container status \"f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89\": rpc error: code = NotFound desc = could not find container \"f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89\": container with ID starting with f4b57dbc6e34beae984092a6b247cdb806451ca1af9d17407dc021277bda3d89 not found: ID does not exist" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.946374 4687 scope.go:117] "RemoveContainer" containerID="7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.947367 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632"} err="failed to get container status \"7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632\": rpc error: code = NotFound desc = could not find container \"7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632\": container with ID starting with 7f034fa1d6eb6fe8e78881bf4cbb1790de71c407b6a0c54d848c957d71342632 not found: ID does not exist" Nov 25 09:25:09 crc kubenswrapper[4687]: I1125 09:25:09.968862 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.126848 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px4ns\" (UniqueName: \"kubernetes.io/projected/82643de8-e543-4a8e-9ed3-5e676aa60d7a-kube-api-access-px4ns\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.126957 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-config-data\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.127675 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.127708 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.127745 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82643de8-e543-4a8e-9ed3-5e676aa60d7a-logs\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.228985 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-config-data\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.229061 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.229098 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.229132 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82643de8-e543-4a8e-9ed3-5e676aa60d7a-logs\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.229231 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px4ns\" (UniqueName: \"kubernetes.io/projected/82643de8-e543-4a8e-9ed3-5e676aa60d7a-kube-api-access-px4ns\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.230131 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82643de8-e543-4a8e-9ed3-5e676aa60d7a-logs\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.233791 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-config-data\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.238061 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.238400 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.248049 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px4ns\" (UniqueName: \"kubernetes.io/projected/82643de8-e543-4a8e-9ed3-5e676aa60d7a-kube-api-access-px4ns\") pod \"nova-metadata-0\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.274029 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.758197 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:10 crc kubenswrapper[4687]: W1125 09:25:10.766474 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82643de8_e543_4a8e_9ed3_5e676aa60d7a.slice/crio-2386db0d4b35f7be22a4a2b0aef304089c7105d0231c8335fc2d3c92f457df09 WatchSource:0}: Error finding container 2386db0d4b35f7be22a4a2b0aef304089c7105d0231c8335fc2d3c92f457df09: Status 404 returned error can't find the container with id 2386db0d4b35f7be22a4a2b0aef304089c7105d0231c8335fc2d3c92f457df09 Nov 25 09:25:10 crc kubenswrapper[4687]: I1125 09:25:10.873250 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"82643de8-e543-4a8e-9ed3-5e676aa60d7a","Type":"ContainerStarted","Data":"2386db0d4b35f7be22a4a2b0aef304089c7105d0231c8335fc2d3c92f457df09"} Nov 25 09:25:11 crc kubenswrapper[4687]: I1125 09:25:11.744784 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b34621cb-726c-4a5a-9f6b-62108919b986" path="/var/lib/kubelet/pods/b34621cb-726c-4a5a-9f6b-62108919b986/volumes" Nov 25 09:25:11 crc kubenswrapper[4687]: I1125 09:25:11.882974 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"82643de8-e543-4a8e-9ed3-5e676aa60d7a","Type":"ContainerStarted","Data":"b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3"} Nov 25 09:25:11 crc kubenswrapper[4687]: I1125 09:25:11.883033 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"82643de8-e543-4a8e-9ed3-5e676aa60d7a","Type":"ContainerStarted","Data":"0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b"} Nov 25 09:25:11 crc kubenswrapper[4687]: I1125 09:25:11.884496 4687 generic.go:334] "Generic (PLEG): container finished" podID="7ced595e-30dd-4300-b8b6-df549003f298" containerID="36dcaced308de8a14b8e0fa94c3bc5987d0b6dea9eea2dd787cc9622583dd1d4" exitCode=0 Nov 25 09:25:11 crc kubenswrapper[4687]: I1125 09:25:11.884548 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fp7dc" event={"ID":"7ced595e-30dd-4300-b8b6-df549003f298","Type":"ContainerDied","Data":"36dcaced308de8a14b8e0fa94c3bc5987d0b6dea9eea2dd787cc9622583dd1d4"} Nov 25 09:25:11 crc kubenswrapper[4687]: I1125 09:25:11.935965 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.935945227 podStartE2EDuration="2.935945227s" podCreationTimestamp="2025-11-25 09:25:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:11.899086552 +0000 UTC m=+1306.952726270" watchObservedRunningTime="2025-11-25 09:25:11.935945227 +0000 UTC m=+1306.989584945" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.080998 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.081266 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.114057 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.114115 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.141684 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.449563 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.529651 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.585318 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xg9r6"] Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.585573 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" podUID="a6ddcca5-c362-40ff-94ab-feb6330cc792" containerName="dnsmasq-dns" containerID="cri-o://10cc38fe6b9696c688a29bf40f289e16aa12d51afd4b9613b42156ae1d77acce" gracePeriod=10 Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.636721 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcvnq\" (UniqueName: \"kubernetes.io/projected/7ced595e-30dd-4300-b8b6-df549003f298-kube-api-access-xcvnq\") pod \"7ced595e-30dd-4300-b8b6-df549003f298\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.636782 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-combined-ca-bundle\") pod \"7ced595e-30dd-4300-b8b6-df549003f298\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.636827 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-scripts\") pod \"7ced595e-30dd-4300-b8b6-df549003f298\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.636864 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-config-data\") pod \"7ced595e-30dd-4300-b8b6-df549003f298\" (UID: \"7ced595e-30dd-4300-b8b6-df549003f298\") " Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.643083 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-scripts" (OuterVolumeSpecName: "scripts") pod "7ced595e-30dd-4300-b8b6-df549003f298" (UID: "7ced595e-30dd-4300-b8b6-df549003f298"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.643692 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ced595e-30dd-4300-b8b6-df549003f298-kube-api-access-xcvnq" (OuterVolumeSpecName: "kube-api-access-xcvnq") pod "7ced595e-30dd-4300-b8b6-df549003f298" (UID: "7ced595e-30dd-4300-b8b6-df549003f298"). InnerVolumeSpecName "kube-api-access-xcvnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.669975 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ced595e-30dd-4300-b8b6-df549003f298" (UID: "7ced595e-30dd-4300-b8b6-df549003f298"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.687740 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-config-data" (OuterVolumeSpecName: "config-data") pod "7ced595e-30dd-4300-b8b6-df549003f298" (UID: "7ced595e-30dd-4300-b8b6-df549003f298"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.738779 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcvnq\" (UniqueName: \"kubernetes.io/projected/7ced595e-30dd-4300-b8b6-df549003f298-kube-api-access-xcvnq\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.738812 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.738822 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.738831 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ced595e-30dd-4300-b8b6-df549003f298-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.907267 4687 generic.go:334] "Generic (PLEG): container finished" podID="a6ddcca5-c362-40ff-94ab-feb6330cc792" containerID="10cc38fe6b9696c688a29bf40f289e16aa12d51afd4b9613b42156ae1d77acce" exitCode=0 Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.907354 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" event={"ID":"a6ddcca5-c362-40ff-94ab-feb6330cc792","Type":"ContainerDied","Data":"10cc38fe6b9696c688a29bf40f289e16aa12d51afd4b9613b42156ae1d77acce"} Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.909371 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fp7dc" event={"ID":"7ced595e-30dd-4300-b8b6-df549003f298","Type":"ContainerDied","Data":"07630f9b3189a6994cc37af8b03e917c327d30831e0b107c944420a158b4ad21"} Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.909398 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07630f9b3189a6994cc37af8b03e917c327d30831e0b107c944420a158b4ad21" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.909626 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fp7dc" Nov 25 09:25:13 crc kubenswrapper[4687]: I1125 09:25:13.944031 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.049817 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.163700 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.181:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.163726 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.181:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.230017 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.230246 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerName="nova-api-log" containerID="cri-o://0efc4090de6c6bc00dfa67ad88d372225ad91483575a19d88bfa54152f9fed34" gracePeriod=30 Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.230267 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerName="nova-api-api" containerID="cri-o://64d8f8d2d305a10c0d6578ff4fe5b91e62bed4882ebfcf1f85a1efb3abd9d760" gracePeriod=30 Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.247331 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nb4rn\" (UniqueName: \"kubernetes.io/projected/a6ddcca5-c362-40ff-94ab-feb6330cc792-kube-api-access-nb4rn\") pod \"a6ddcca5-c362-40ff-94ab-feb6330cc792\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.247424 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-config\") pod \"a6ddcca5-c362-40ff-94ab-feb6330cc792\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.247526 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-svc\") pod \"a6ddcca5-c362-40ff-94ab-feb6330cc792\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.247619 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-nb\") pod \"a6ddcca5-c362-40ff-94ab-feb6330cc792\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.247892 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-swift-storage-0\") pod \"a6ddcca5-c362-40ff-94ab-feb6330cc792\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.247945 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-sb\") pod \"a6ddcca5-c362-40ff-94ab-feb6330cc792\" (UID: \"a6ddcca5-c362-40ff-94ab-feb6330cc792\") " Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.251168 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6ddcca5-c362-40ff-94ab-feb6330cc792-kube-api-access-nb4rn" (OuterVolumeSpecName: "kube-api-access-nb4rn") pod "a6ddcca5-c362-40ff-94ab-feb6330cc792" (UID: "a6ddcca5-c362-40ff-94ab-feb6330cc792"). InnerVolumeSpecName "kube-api-access-nb4rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.296703 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.296902 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" containerName="nova-metadata-log" containerID="cri-o://0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b" gracePeriod=30 Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.297491 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" containerName="nova-metadata-metadata" containerID="cri-o://b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3" gracePeriod=30 Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.309039 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a6ddcca5-c362-40ff-94ab-feb6330cc792" (UID: "a6ddcca5-c362-40ff-94ab-feb6330cc792"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.324658 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a6ddcca5-c362-40ff-94ab-feb6330cc792" (UID: "a6ddcca5-c362-40ff-94ab-feb6330cc792"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.324671 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a6ddcca5-c362-40ff-94ab-feb6330cc792" (UID: "a6ddcca5-c362-40ff-94ab-feb6330cc792"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.324916 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-config" (OuterVolumeSpecName: "config") pod "a6ddcca5-c362-40ff-94ab-feb6330cc792" (UID: "a6ddcca5-c362-40ff-94ab-feb6330cc792"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.351194 4687 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.351222 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nb4rn\" (UniqueName: \"kubernetes.io/projected/a6ddcca5-c362-40ff-94ab-feb6330cc792-kube-api-access-nb4rn\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.351234 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.351243 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.351251 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.354080 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a6ddcca5-c362-40ff-94ab-feb6330cc792" (UID: "a6ddcca5-c362-40ff-94ab-feb6330cc792"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.395223 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.453251 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6ddcca5-c362-40ff-94ab-feb6330cc792-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.861536 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.937379 4687 generic.go:334] "Generic (PLEG): container finished" podID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerID="0efc4090de6c6bc00dfa67ad88d372225ad91483575a19d88bfa54152f9fed34" exitCode=143 Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.937469 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8cb72006-ba6e-4b06-9033-7ffb99c38416","Type":"ContainerDied","Data":"0efc4090de6c6bc00dfa67ad88d372225ad91483575a19d88bfa54152f9fed34"} Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.960081 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" event={"ID":"a6ddcca5-c362-40ff-94ab-feb6330cc792","Type":"ContainerDied","Data":"c002f1666eedb9ac434a0dcd7b17032ed304366c3b90d78d32973e34afd5f469"} Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.960134 4687 scope.go:117] "RemoveContainer" containerID="10cc38fe6b9696c688a29bf40f289e16aa12d51afd4b9613b42156ae1d77acce" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.960136 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-xg9r6" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.967446 4687 generic.go:334] "Generic (PLEG): container finished" podID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" containerID="b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3" exitCode=0 Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.967478 4687 generic.go:334] "Generic (PLEG): container finished" podID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" containerID="0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b" exitCode=143 Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.968142 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.968558 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"82643de8-e543-4a8e-9ed3-5e676aa60d7a","Type":"ContainerDied","Data":"b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3"} Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.968585 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"82643de8-e543-4a8e-9ed3-5e676aa60d7a","Type":"ContainerDied","Data":"0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b"} Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.968595 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"82643de8-e543-4a8e-9ed3-5e676aa60d7a","Type":"ContainerDied","Data":"2386db0d4b35f7be22a4a2b0aef304089c7105d0231c8335fc2d3c92f457df09"} Nov 25 09:25:14 crc kubenswrapper[4687]: I1125 09:25:14.991961 4687 scope.go:117] "RemoveContainer" containerID="8c238b9ee45a69696cfd408c888c342c1510cb6c0405d77e4584b05c49ca4691" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.008571 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xg9r6"] Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.014926 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xg9r6"] Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.027479 4687 scope.go:117] "RemoveContainer" containerID="b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.049354 4687 scope.go:117] "RemoveContainer" containerID="0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.066691 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-config-data\") pod \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.066765 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82643de8-e543-4a8e-9ed3-5e676aa60d7a-logs\") pod \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.067002 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-px4ns\" (UniqueName: \"kubernetes.io/projected/82643de8-e543-4a8e-9ed3-5e676aa60d7a-kube-api-access-px4ns\") pod \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.067067 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-combined-ca-bundle\") pod \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.067172 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-nova-metadata-tls-certs\") pod \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\" (UID: \"82643de8-e543-4a8e-9ed3-5e676aa60d7a\") " Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.069784 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82643de8-e543-4a8e-9ed3-5e676aa60d7a-logs" (OuterVolumeSpecName: "logs") pod "82643de8-e543-4a8e-9ed3-5e676aa60d7a" (UID: "82643de8-e543-4a8e-9ed3-5e676aa60d7a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.071665 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82643de8-e543-4a8e-9ed3-5e676aa60d7a-kube-api-access-px4ns" (OuterVolumeSpecName: "kube-api-access-px4ns") pod "82643de8-e543-4a8e-9ed3-5e676aa60d7a" (UID: "82643de8-e543-4a8e-9ed3-5e676aa60d7a"). InnerVolumeSpecName "kube-api-access-px4ns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.078639 4687 scope.go:117] "RemoveContainer" containerID="b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3" Nov 25 09:25:15 crc kubenswrapper[4687]: E1125 09:25:15.079168 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3\": container with ID starting with b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3 not found: ID does not exist" containerID="b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.079214 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3"} err="failed to get container status \"b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3\": rpc error: code = NotFound desc = could not find container \"b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3\": container with ID starting with b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3 not found: ID does not exist" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.079248 4687 scope.go:117] "RemoveContainer" containerID="0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b" Nov 25 09:25:15 crc kubenswrapper[4687]: E1125 09:25:15.079690 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b\": container with ID starting with 0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b not found: ID does not exist" containerID="0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.079716 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b"} err="failed to get container status \"0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b\": rpc error: code = NotFound desc = could not find container \"0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b\": container with ID starting with 0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b not found: ID does not exist" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.079732 4687 scope.go:117] "RemoveContainer" containerID="b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.080646 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3"} err="failed to get container status \"b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3\": rpc error: code = NotFound desc = could not find container \"b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3\": container with ID starting with b1555a3e7f78fca169e8cc2b59768d02f2277554f217e95908fb099ef37e17a3 not found: ID does not exist" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.080698 4687 scope.go:117] "RemoveContainer" containerID="0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.081769 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b"} err="failed to get container status \"0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b\": rpc error: code = NotFound desc = could not find container \"0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b\": container with ID starting with 0494766befc5f1c86728e34426a2647f76cd5cd9025e57b585b248400e38996b not found: ID does not exist" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.096695 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-config-data" (OuterVolumeSpecName: "config-data") pod "82643de8-e543-4a8e-9ed3-5e676aa60d7a" (UID: "82643de8-e543-4a8e-9ed3-5e676aa60d7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.102533 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "82643de8-e543-4a8e-9ed3-5e676aa60d7a" (UID: "82643de8-e543-4a8e-9ed3-5e676aa60d7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.137708 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "82643de8-e543-4a8e-9ed3-5e676aa60d7a" (UID: "82643de8-e543-4a8e-9ed3-5e676aa60d7a"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.168878 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.169895 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82643de8-e543-4a8e-9ed3-5e676aa60d7a-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.169964 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-px4ns\" (UniqueName: \"kubernetes.io/projected/82643de8-e543-4a8e-9ed3-5e676aa60d7a-kube-api-access-px4ns\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.170049 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.170113 4687 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/82643de8-e543-4a8e-9ed3-5e676aa60d7a-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.304676 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.314613 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.335363 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:15 crc kubenswrapper[4687]: E1125 09:25:15.335832 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6ddcca5-c362-40ff-94ab-feb6330cc792" containerName="init" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.335854 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6ddcca5-c362-40ff-94ab-feb6330cc792" containerName="init" Nov 25 09:25:15 crc kubenswrapper[4687]: E1125 09:25:15.335881 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" containerName="nova-metadata-metadata" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.335889 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" containerName="nova-metadata-metadata" Nov 25 09:25:15 crc kubenswrapper[4687]: E1125 09:25:15.335902 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ced595e-30dd-4300-b8b6-df549003f298" containerName="nova-manage" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.335910 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ced595e-30dd-4300-b8b6-df549003f298" containerName="nova-manage" Nov 25 09:25:15 crc kubenswrapper[4687]: E1125 09:25:15.335919 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" containerName="nova-metadata-log" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.335927 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" containerName="nova-metadata-log" Nov 25 09:25:15 crc kubenswrapper[4687]: E1125 09:25:15.335961 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6ddcca5-c362-40ff-94ab-feb6330cc792" containerName="dnsmasq-dns" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.335970 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6ddcca5-c362-40ff-94ab-feb6330cc792" containerName="dnsmasq-dns" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.336173 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" containerName="nova-metadata-metadata" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.336196 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ced595e-30dd-4300-b8b6-df549003f298" containerName="nova-manage" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.336213 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" containerName="nova-metadata-log" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.336227 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6ddcca5-c362-40ff-94ab-feb6330cc792" containerName="dnsmasq-dns" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.337830 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.341300 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.341580 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.347432 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.372379 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-config-data\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.372636 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.372778 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/663335a7-77d3-4114-b2c4-7deb73b7e11d-logs\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.372889 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vktm9\" (UniqueName: \"kubernetes.io/projected/663335a7-77d3-4114-b2c4-7deb73b7e11d-kube-api-access-vktm9\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.373059 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.474614 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/663335a7-77d3-4114-b2c4-7deb73b7e11d-logs\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.474895 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vktm9\" (UniqueName: \"kubernetes.io/projected/663335a7-77d3-4114-b2c4-7deb73b7e11d-kube-api-access-vktm9\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.475048 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.475265 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-config-data\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.475356 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.483370 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.483930 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/663335a7-77d3-4114-b2c4-7deb73b7e11d-logs\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.485002 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.497263 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-config-data\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.519570 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vktm9\" (UniqueName: \"kubernetes.io/projected/663335a7-77d3-4114-b2c4-7deb73b7e11d-kube-api-access-vktm9\") pod \"nova-metadata-0\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.733175 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.753806 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82643de8-e543-4a8e-9ed3-5e676aa60d7a" path="/var/lib/kubelet/pods/82643de8-e543-4a8e-9ed3-5e676aa60d7a/volumes" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.754675 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6ddcca5-c362-40ff-94ab-feb6330cc792" path="/var/lib/kubelet/pods/a6ddcca5-c362-40ff-94ab-feb6330cc792/volumes" Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.993874 4687 generic.go:334] "Generic (PLEG): container finished" podID="08ecdbfb-3483-4dc7-83a1-a9dd7b03126a" containerID="9072d01244da2515ab0e242e0a9776bf583d4df342452cd2c72d2a5c77ed4fff" exitCode=0 Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.993953 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6nqsr" event={"ID":"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a","Type":"ContainerDied","Data":"9072d01244da2515ab0e242e0a9776bf583d4df342452cd2c72d2a5c77ed4fff"} Nov 25 09:25:15 crc kubenswrapper[4687]: I1125 09:25:15.994304 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="083a6ba3-a4ab-4f50-948d-bb7e02c0f886" containerName="nova-scheduler-scheduler" containerID="cri-o://90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae" gracePeriod=30 Nov 25 09:25:16 crc kubenswrapper[4687]: I1125 09:25:16.178570 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:16 crc kubenswrapper[4687]: W1125 09:25:16.183316 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod663335a7_77d3_4114_b2c4_7deb73b7e11d.slice/crio-320cde6345c972701d78c75031a5b3ca96edefb530aa826b8f8950f1be43aa25 WatchSource:0}: Error finding container 320cde6345c972701d78c75031a5b3ca96edefb530aa826b8f8950f1be43aa25: Status 404 returned error can't find the container with id 320cde6345c972701d78c75031a5b3ca96edefb530aa826b8f8950f1be43aa25 Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.006378 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"663335a7-77d3-4114-b2c4-7deb73b7e11d","Type":"ContainerStarted","Data":"62444ed88a14d68e2d69dc116fddc906b095f8d99bbbe82c863affa9c5a51108"} Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.006786 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"663335a7-77d3-4114-b2c4-7deb73b7e11d","Type":"ContainerStarted","Data":"1331baeb55abad14196e470d7b417e6be7d817595861f55e802c6414ea65d7b9"} Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.006803 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"663335a7-77d3-4114-b2c4-7deb73b7e11d","Type":"ContainerStarted","Data":"320cde6345c972701d78c75031a5b3ca96edefb530aa826b8f8950f1be43aa25"} Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.030895 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.030871705 podStartE2EDuration="2.030871705s" podCreationTimestamp="2025-11-25 09:25:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:17.028703926 +0000 UTC m=+1312.082343644" watchObservedRunningTime="2025-11-25 09:25:17.030871705 +0000 UTC m=+1312.084511423" Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.364630 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.422919 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-combined-ca-bundle\") pod \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.422976 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-scripts\") pod \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.423085 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-298g8\" (UniqueName: \"kubernetes.io/projected/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-kube-api-access-298g8\") pod \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.423238 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-config-data\") pod \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\" (UID: \"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a\") " Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.427950 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-scripts" (OuterVolumeSpecName: "scripts") pod "08ecdbfb-3483-4dc7-83a1-a9dd7b03126a" (UID: "08ecdbfb-3483-4dc7-83a1-a9dd7b03126a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.440879 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-kube-api-access-298g8" (OuterVolumeSpecName: "kube-api-access-298g8") pod "08ecdbfb-3483-4dc7-83a1-a9dd7b03126a" (UID: "08ecdbfb-3483-4dc7-83a1-a9dd7b03126a"). InnerVolumeSpecName "kube-api-access-298g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.450539 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08ecdbfb-3483-4dc7-83a1-a9dd7b03126a" (UID: "08ecdbfb-3483-4dc7-83a1-a9dd7b03126a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.456190 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-config-data" (OuterVolumeSpecName: "config-data") pod "08ecdbfb-3483-4dc7-83a1-a9dd7b03126a" (UID: "08ecdbfb-3483-4dc7-83a1-a9dd7b03126a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.525572 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.525867 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.525877 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:17 crc kubenswrapper[4687]: I1125 09:25:17.525886 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-298g8\" (UniqueName: \"kubernetes.io/projected/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a-kube-api-access-298g8\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.017225 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6nqsr" event={"ID":"08ecdbfb-3483-4dc7-83a1-a9dd7b03126a","Type":"ContainerDied","Data":"6746e1344e6a935acc1e7b7ef7c4d5466b9271708ebee67bcb1f9cda3f89d215"} Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.017305 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6nqsr" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.017318 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6746e1344e6a935acc1e7b7ef7c4d5466b9271708ebee67bcb1f9cda3f89d215" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.086158 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 09:25:18 crc kubenswrapper[4687]: E1125 09:25:18.086957 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08ecdbfb-3483-4dc7-83a1-a9dd7b03126a" containerName="nova-cell1-conductor-db-sync" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.087076 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="08ecdbfb-3483-4dc7-83a1-a9dd7b03126a" containerName="nova-cell1-conductor-db-sync" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.087357 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="08ecdbfb-3483-4dc7-83a1-a9dd7b03126a" containerName="nova-cell1-conductor-db-sync" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.088149 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.090497 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.110749 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 09:25:18 crc kubenswrapper[4687]: E1125 09:25:18.116383 4687 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 09:25:18 crc kubenswrapper[4687]: E1125 09:25:18.118241 4687 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 09:25:18 crc kubenswrapper[4687]: E1125 09:25:18.120788 4687 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 09:25:18 crc kubenswrapper[4687]: E1125 09:25:18.120834 4687 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="083a6ba3-a4ab-4f50-948d-bb7e02c0f886" containerName="nova-scheduler-scheduler" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.261484 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk6w7\" (UniqueName: \"kubernetes.io/projected/4f859978-9653-48fd-9c45-a2eb11561c0d-kube-api-access-qk6w7\") pod \"nova-cell1-conductor-0\" (UID: \"4f859978-9653-48fd-9c45-a2eb11561c0d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.261595 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f859978-9653-48fd-9c45-a2eb11561c0d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4f859978-9653-48fd-9c45-a2eb11561c0d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.261644 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f859978-9653-48fd-9c45-a2eb11561c0d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4f859978-9653-48fd-9c45-a2eb11561c0d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.363292 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk6w7\" (UniqueName: \"kubernetes.io/projected/4f859978-9653-48fd-9c45-a2eb11561c0d-kube-api-access-qk6w7\") pod \"nova-cell1-conductor-0\" (UID: \"4f859978-9653-48fd-9c45-a2eb11561c0d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.363394 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f859978-9653-48fd-9c45-a2eb11561c0d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4f859978-9653-48fd-9c45-a2eb11561c0d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.363455 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f859978-9653-48fd-9c45-a2eb11561c0d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4f859978-9653-48fd-9c45-a2eb11561c0d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.367025 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f859978-9653-48fd-9c45-a2eb11561c0d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4f859978-9653-48fd-9c45-a2eb11561c0d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.367988 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f859978-9653-48fd-9c45-a2eb11561c0d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4f859978-9653-48fd-9c45-a2eb11561c0d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.385835 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk6w7\" (UniqueName: \"kubernetes.io/projected/4f859978-9653-48fd-9c45-a2eb11561c0d-kube-api-access-qk6w7\") pod \"nova-cell1-conductor-0\" (UID: \"4f859978-9653-48fd-9c45-a2eb11561c0d\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.408245 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:18 crc kubenswrapper[4687]: I1125 09:25:18.899425 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 09:25:19 crc kubenswrapper[4687]: I1125 09:25:19.029052 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"4f859978-9653-48fd-9c45-a2eb11561c0d","Type":"ContainerStarted","Data":"040b55c8e6d8d1b94b3fef8c4c0f68e883d6fdad0e7418992ccee133ef04ba3c"} Nov 25 09:25:19 crc kubenswrapper[4687]: I1125 09:25:19.796592 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:25:19 crc kubenswrapper[4687]: I1125 09:25:19.891400 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-combined-ca-bundle\") pod \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " Nov 25 09:25:19 crc kubenswrapper[4687]: I1125 09:25:19.891577 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sshwx\" (UniqueName: \"kubernetes.io/projected/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-kube-api-access-sshwx\") pod \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " Nov 25 09:25:19 crc kubenswrapper[4687]: I1125 09:25:19.891642 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-config-data\") pod \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " Nov 25 09:25:19 crc kubenswrapper[4687]: I1125 09:25:19.913865 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-kube-api-access-sshwx" (OuterVolumeSpecName: "kube-api-access-sshwx") pod "083a6ba3-a4ab-4f50-948d-bb7e02c0f886" (UID: "083a6ba3-a4ab-4f50-948d-bb7e02c0f886"). InnerVolumeSpecName "kube-api-access-sshwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:19 crc kubenswrapper[4687]: E1125 09:25:19.919226 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-combined-ca-bundle podName:083a6ba3-a4ab-4f50-948d-bb7e02c0f886 nodeName:}" failed. No retries permitted until 2025-11-25 09:25:20.419191808 +0000 UTC m=+1315.472831526 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-combined-ca-bundle") pod "083a6ba3-a4ab-4f50-948d-bb7e02c0f886" (UID: "083a6ba3-a4ab-4f50-948d-bb7e02c0f886") : error deleting /var/lib/kubelet/pods/083a6ba3-a4ab-4f50-948d-bb7e02c0f886/volume-subpaths: remove /var/lib/kubelet/pods/083a6ba3-a4ab-4f50-948d-bb7e02c0f886/volume-subpaths: no such file or directory Nov 25 09:25:19 crc kubenswrapper[4687]: I1125 09:25:19.922774 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-config-data" (OuterVolumeSpecName: "config-data") pod "083a6ba3-a4ab-4f50-948d-bb7e02c0f886" (UID: "083a6ba3-a4ab-4f50-948d-bb7e02c0f886"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:19 crc kubenswrapper[4687]: I1125 09:25:19.993290 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sshwx\" (UniqueName: \"kubernetes.io/projected/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-kube-api-access-sshwx\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:19 crc kubenswrapper[4687]: I1125 09:25:19.993334 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.049866 4687 generic.go:334] "Generic (PLEG): container finished" podID="083a6ba3-a4ab-4f50-948d-bb7e02c0f886" containerID="90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae" exitCode=0 Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.050029 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.050127 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"083a6ba3-a4ab-4f50-948d-bb7e02c0f886","Type":"ContainerDied","Data":"90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae"} Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.050188 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"083a6ba3-a4ab-4f50-948d-bb7e02c0f886","Type":"ContainerDied","Data":"25a2991cc395a97295e9db6fcf9464b699f676cf0037a1e1ea91b1b8a014e692"} Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.050213 4687 scope.go:117] "RemoveContainer" containerID="90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.055864 4687 generic.go:334] "Generic (PLEG): container finished" podID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerID="64d8f8d2d305a10c0d6578ff4fe5b91e62bed4882ebfcf1f85a1efb3abd9d760" exitCode=0 Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.055965 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8cb72006-ba6e-4b06-9033-7ffb99c38416","Type":"ContainerDied","Data":"64d8f8d2d305a10c0d6578ff4fe5b91e62bed4882ebfcf1f85a1efb3abd9d760"} Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.059284 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"4f859978-9653-48fd-9c45-a2eb11561c0d","Type":"ContainerStarted","Data":"bbf667f9df6564c94de1bfa045cfd2fc638bc954211c502e1da6d51dd1d814f7"} Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.060445 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.072796 4687 scope.go:117] "RemoveContainer" containerID="90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae" Nov 25 09:25:20 crc kubenswrapper[4687]: E1125 09:25:20.073128 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae\": container with ID starting with 90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae not found: ID does not exist" containerID="90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.073155 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae"} err="failed to get container status \"90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae\": rpc error: code = NotFound desc = could not find container \"90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae\": container with ID starting with 90e8b420983469556557a064231e1c06a2c7f15de027fdf75aa01a7de34e9fae not found: ID does not exist" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.086443 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.086426 podStartE2EDuration="2.086426s" podCreationTimestamp="2025-11-25 09:25:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:20.073788214 +0000 UTC m=+1315.127427932" watchObservedRunningTime="2025-11-25 09:25:20.086426 +0000 UTC m=+1315.140065718" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.264098 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.398584 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-config-data\") pod \"8cb72006-ba6e-4b06-9033-7ffb99c38416\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.398749 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jg84d\" (UniqueName: \"kubernetes.io/projected/8cb72006-ba6e-4b06-9033-7ffb99c38416-kube-api-access-jg84d\") pod \"8cb72006-ba6e-4b06-9033-7ffb99c38416\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.398795 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-combined-ca-bundle\") pod \"8cb72006-ba6e-4b06-9033-7ffb99c38416\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.398944 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cb72006-ba6e-4b06-9033-7ffb99c38416-logs\") pod \"8cb72006-ba6e-4b06-9033-7ffb99c38416\" (UID: \"8cb72006-ba6e-4b06-9033-7ffb99c38416\") " Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.399716 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cb72006-ba6e-4b06-9033-7ffb99c38416-logs" (OuterVolumeSpecName: "logs") pod "8cb72006-ba6e-4b06-9033-7ffb99c38416" (UID: "8cb72006-ba6e-4b06-9033-7ffb99c38416"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.402865 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cb72006-ba6e-4b06-9033-7ffb99c38416-kube-api-access-jg84d" (OuterVolumeSpecName: "kube-api-access-jg84d") pod "8cb72006-ba6e-4b06-9033-7ffb99c38416" (UID: "8cb72006-ba6e-4b06-9033-7ffb99c38416"). InnerVolumeSpecName "kube-api-access-jg84d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.429822 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-config-data" (OuterVolumeSpecName: "config-data") pod "8cb72006-ba6e-4b06-9033-7ffb99c38416" (UID: "8cb72006-ba6e-4b06-9033-7ffb99c38416"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.437639 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8cb72006-ba6e-4b06-9033-7ffb99c38416" (UID: "8cb72006-ba6e-4b06-9033-7ffb99c38416"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.500284 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-combined-ca-bundle\") pod \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\" (UID: \"083a6ba3-a4ab-4f50-948d-bb7e02c0f886\") " Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.501019 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.501068 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jg84d\" (UniqueName: \"kubernetes.io/projected/8cb72006-ba6e-4b06-9033-7ffb99c38416-kube-api-access-jg84d\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.501084 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cb72006-ba6e-4b06-9033-7ffb99c38416-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.501096 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cb72006-ba6e-4b06-9033-7ffb99c38416-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.504747 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "083a6ba3-a4ab-4f50-948d-bb7e02c0f886" (UID: "083a6ba3-a4ab-4f50-948d-bb7e02c0f886"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.602413 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/083a6ba3-a4ab-4f50-948d-bb7e02c0f886-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.686636 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.704554 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.721987 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:25:20 crc kubenswrapper[4687]: E1125 09:25:20.722785 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerName="nova-api-api" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.722892 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerName="nova-api-api" Nov 25 09:25:20 crc kubenswrapper[4687]: E1125 09:25:20.722953 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="083a6ba3-a4ab-4f50-948d-bb7e02c0f886" containerName="nova-scheduler-scheduler" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.723009 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="083a6ba3-a4ab-4f50-948d-bb7e02c0f886" containerName="nova-scheduler-scheduler" Nov 25 09:25:20 crc kubenswrapper[4687]: E1125 09:25:20.723063 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerName="nova-api-log" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.723112 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerName="nova-api-log" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.723341 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="083a6ba3-a4ab-4f50-948d-bb7e02c0f886" containerName="nova-scheduler-scheduler" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.723412 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerName="nova-api-log" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.723469 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" containerName="nova-api-api" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.724102 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.726957 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.733629 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.733773 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.733791 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.910433 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-config-data\") pod \"nova-scheduler-0\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.910539 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:20 crc kubenswrapper[4687]: I1125 09:25:20.910608 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bw6x\" (UniqueName: \"kubernetes.io/projected/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-kube-api-access-8bw6x\") pod \"nova-scheduler-0\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.012543 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-config-data\") pod \"nova-scheduler-0\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.012891 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.013157 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bw6x\" (UniqueName: \"kubernetes.io/projected/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-kube-api-access-8bw6x\") pod \"nova-scheduler-0\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.016090 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-config-data\") pod \"nova-scheduler-0\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.020153 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.032641 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bw6x\" (UniqueName: \"kubernetes.io/projected/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-kube-api-access-8bw6x\") pod \"nova-scheduler-0\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " pod="openstack/nova-scheduler-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.053009 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.072961 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8cb72006-ba6e-4b06-9033-7ffb99c38416","Type":"ContainerDied","Data":"e2f5c1cdc673e4cd46acb06c47febcfca0192dfefbc2e8c022d23a8c67f3e691"} Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.072995 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.073003 4687 scope.go:117] "RemoveContainer" containerID="64d8f8d2d305a10c0d6578ff4fe5b91e62bed4882ebfcf1f85a1efb3abd9d760" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.122289 4687 scope.go:117] "RemoveContainer" containerID="0efc4090de6c6bc00dfa67ad88d372225ad91483575a19d88bfa54152f9fed34" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.126136 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.144552 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.154785 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.156412 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.159155 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.166020 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.317749 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.318521 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-config-data\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.318598 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc5nk\" (UniqueName: \"kubernetes.io/projected/e41b4fe4-f7e3-4321-8d63-555a08900ab1-kube-api-access-fc5nk\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.318636 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e41b4fe4-f7e3-4321-8d63-555a08900ab1-logs\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.420864 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fc5nk\" (UniqueName: \"kubernetes.io/projected/e41b4fe4-f7e3-4321-8d63-555a08900ab1-kube-api-access-fc5nk\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.421001 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e41b4fe4-f7e3-4321-8d63-555a08900ab1-logs\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.421150 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.421205 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-config-data\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.421481 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e41b4fe4-f7e3-4321-8d63-555a08900ab1-logs\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.427533 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-config-data\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.433161 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.443127 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fc5nk\" (UniqueName: \"kubernetes.io/projected/e41b4fe4-f7e3-4321-8d63-555a08900ab1-kube-api-access-fc5nk\") pod \"nova-api-0\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.480929 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.544753 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.757232 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="083a6ba3-a4ab-4f50-948d-bb7e02c0f886" path="/var/lib/kubelet/pods/083a6ba3-a4ab-4f50-948d-bb7e02c0f886/volumes" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.758459 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cb72006-ba6e-4b06-9033-7ffb99c38416" path="/var/lib/kubelet/pods/8cb72006-ba6e-4b06-9033-7ffb99c38416/volumes" Nov 25 09:25:21 crc kubenswrapper[4687]: I1125 09:25:21.969403 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:21 crc kubenswrapper[4687]: W1125 09:25:21.976975 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode41b4fe4_f7e3_4321_8d63_555a08900ab1.slice/crio-5f2fd351fab3d146c7f7178056b3bc33b8726c5bfa9666ad3b00710f26f36c7e WatchSource:0}: Error finding container 5f2fd351fab3d146c7f7178056b3bc33b8726c5bfa9666ad3b00710f26f36c7e: Status 404 returned error can't find the container with id 5f2fd351fab3d146c7f7178056b3bc33b8726c5bfa9666ad3b00710f26f36c7e Nov 25 09:25:22 crc kubenswrapper[4687]: I1125 09:25:22.102491 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3","Type":"ContainerStarted","Data":"fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271"} Nov 25 09:25:22 crc kubenswrapper[4687]: I1125 09:25:22.102773 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3","Type":"ContainerStarted","Data":"ace1d20e70f3897aee39ca79a8efbcedb961a1bb4886990fa7a27ee8dce4a925"} Nov 25 09:25:22 crc kubenswrapper[4687]: I1125 09:25:22.103870 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e41b4fe4-f7e3-4321-8d63-555a08900ab1","Type":"ContainerStarted","Data":"5f2fd351fab3d146c7f7178056b3bc33b8726c5bfa9666ad3b00710f26f36c7e"} Nov 25 09:25:22 crc kubenswrapper[4687]: I1125 09:25:22.132660 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.132640299 podStartE2EDuration="2.132640299s" podCreationTimestamp="2025-11-25 09:25:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:22.129843813 +0000 UTC m=+1317.183483551" watchObservedRunningTime="2025-11-25 09:25:22.132640299 +0000 UTC m=+1317.186280017" Nov 25 09:25:22 crc kubenswrapper[4687]: I1125 09:25:22.922022 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 09:25:23 crc kubenswrapper[4687]: I1125 09:25:23.113859 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e41b4fe4-f7e3-4321-8d63-555a08900ab1","Type":"ContainerStarted","Data":"5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91"} Nov 25 09:25:24 crc kubenswrapper[4687]: I1125 09:25:24.125769 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e41b4fe4-f7e3-4321-8d63-555a08900ab1","Type":"ContainerStarted","Data":"899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb"} Nov 25 09:25:24 crc kubenswrapper[4687]: I1125 09:25:24.151414 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.15139287 podStartE2EDuration="3.15139287s" podCreationTimestamp="2025-11-25 09:25:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:24.147073163 +0000 UTC m=+1319.200712911" watchObservedRunningTime="2025-11-25 09:25:24.15139287 +0000 UTC m=+1319.205032588" Nov 25 09:25:25 crc kubenswrapper[4687]: I1125 09:25:25.747782 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 09:25:25 crc kubenswrapper[4687]: I1125 09:25:25.748064 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 09:25:26 crc kubenswrapper[4687]: I1125 09:25:26.053652 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 09:25:26 crc kubenswrapper[4687]: I1125 09:25:26.388148 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:25:26 crc kubenswrapper[4687]: I1125 09:25:26.388766 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="1c8dccfc-e66b-4a60-b0c4-d7c94652dd12" containerName="kube-state-metrics" containerID="cri-o://063ccf89159c0cd4d2cf70e1bd2fd09923dd1d73727983993e2764b5914f3543" gracePeriod=30 Nov 25 09:25:26 crc kubenswrapper[4687]: I1125 09:25:26.747846 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:25:26 crc kubenswrapper[4687]: I1125 09:25:26.747878 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:25:27 crc kubenswrapper[4687]: I1125 09:25:27.150252 4687 generic.go:334] "Generic (PLEG): container finished" podID="1c8dccfc-e66b-4a60-b0c4-d7c94652dd12" containerID="063ccf89159c0cd4d2cf70e1bd2fd09923dd1d73727983993e2764b5914f3543" exitCode=2 Nov 25 09:25:27 crc kubenswrapper[4687]: I1125 09:25:27.150297 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1c8dccfc-e66b-4a60-b0c4-d7c94652dd12","Type":"ContainerDied","Data":"063ccf89159c0cd4d2cf70e1bd2fd09923dd1d73727983993e2764b5914f3543"} Nov 25 09:25:28 crc kubenswrapper[4687]: I1125 09:25:28.448268 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 09:25:28 crc kubenswrapper[4687]: I1125 09:25:28.856166 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="1c8dccfc-e66b-4a60-b0c4-d7c94652dd12" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": dial tcp 10.217.0.103:8081: connect: connection refused" Nov 25 09:25:29 crc kubenswrapper[4687]: I1125 09:25:29.530386 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:25:29 crc kubenswrapper[4687]: I1125 09:25:29.708444 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grtzq\" (UniqueName: \"kubernetes.io/projected/1c8dccfc-e66b-4a60-b0c4-d7c94652dd12-kube-api-access-grtzq\") pod \"1c8dccfc-e66b-4a60-b0c4-d7c94652dd12\" (UID: \"1c8dccfc-e66b-4a60-b0c4-d7c94652dd12\") " Nov 25 09:25:29 crc kubenswrapper[4687]: I1125 09:25:29.714149 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c8dccfc-e66b-4a60-b0c4-d7c94652dd12-kube-api-access-grtzq" (OuterVolumeSpecName: "kube-api-access-grtzq") pod "1c8dccfc-e66b-4a60-b0c4-d7c94652dd12" (UID: "1c8dccfc-e66b-4a60-b0c4-d7c94652dd12"). InnerVolumeSpecName "kube-api-access-grtzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:29 crc kubenswrapper[4687]: I1125 09:25:29.811099 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grtzq\" (UniqueName: \"kubernetes.io/projected/1c8dccfc-e66b-4a60-b0c4-d7c94652dd12-kube-api-access-grtzq\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.181678 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1c8dccfc-e66b-4a60-b0c4-d7c94652dd12","Type":"ContainerDied","Data":"b8e8afda35f66bfdd2004769a55297e167b9878cf39ea5d7824fb4222e519e92"} Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.181756 4687 scope.go:117] "RemoveContainer" containerID="063ccf89159c0cd4d2cf70e1bd2fd09923dd1d73727983993e2764b5914f3543" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.181757 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.231680 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.260587 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.273145 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:25:30 crc kubenswrapper[4687]: E1125 09:25:30.273619 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8dccfc-e66b-4a60-b0c4-d7c94652dd12" containerName="kube-state-metrics" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.273641 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8dccfc-e66b-4a60-b0c4-d7c94652dd12" containerName="kube-state-metrics" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.273914 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c8dccfc-e66b-4a60-b0c4-d7c94652dd12" containerName="kube-state-metrics" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.274677 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.277373 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.277889 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.278895 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.320012 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dphgn\" (UniqueName: \"kubernetes.io/projected/20082470-4513-4042-8a66-3117b8a387f4-kube-api-access-dphgn\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.320236 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20082470-4513-4042-8a66-3117b8a387f4-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.320360 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/20082470-4513-4042-8a66-3117b8a387f4-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.320467 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/20082470-4513-4042-8a66-3117b8a387f4-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.422684 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dphgn\" (UniqueName: \"kubernetes.io/projected/20082470-4513-4042-8a66-3117b8a387f4-kube-api-access-dphgn\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.422770 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20082470-4513-4042-8a66-3117b8a387f4-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.422817 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/20082470-4513-4042-8a66-3117b8a387f4-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.422874 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/20082470-4513-4042-8a66-3117b8a387f4-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.428327 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/20082470-4513-4042-8a66-3117b8a387f4-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.428465 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/20082470-4513-4042-8a66-3117b8a387f4-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.438295 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20082470-4513-4042-8a66-3117b8a387f4-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.441440 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dphgn\" (UniqueName: \"kubernetes.io/projected/20082470-4513-4042-8a66-3117b8a387f4-kube-api-access-dphgn\") pod \"kube-state-metrics-0\" (UID: \"20082470-4513-4042-8a66-3117b8a387f4\") " pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.592236 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.878299 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.879093 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="ceilometer-central-agent" containerID="cri-o://f6289cd9775038ba529d1b864f27176b6c934b0cf7617233a23f14d9df43048b" gracePeriod=30 Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.879143 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="sg-core" containerID="cri-o://c436591ce9a835b093718a535f6cb9e6c2388018bddcde9122c79b2d950b52b2" gracePeriod=30 Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.879189 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="ceilometer-notification-agent" containerID="cri-o://c900ffadeb11cde8958d08f0543cb428a3a85902d5e6b33a329f0d018c714155" gracePeriod=30 Nov 25 09:25:30 crc kubenswrapper[4687]: I1125 09:25:30.879123 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="proxy-httpd" containerID="cri-o://8d2038133110eacd56fe1fc055ab94cfeb15af8891520ec151fc2c897c7eda39" gracePeriod=30 Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.054023 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.072036 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.113276 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.193814 4687 generic.go:334] "Generic (PLEG): container finished" podID="88fce615-5e48-4041-b9b3-409bf6a29166" containerID="8d2038133110eacd56fe1fc055ab94cfeb15af8891520ec151fc2c897c7eda39" exitCode=0 Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.193848 4687 generic.go:334] "Generic (PLEG): container finished" podID="88fce615-5e48-4041-b9b3-409bf6a29166" containerID="c436591ce9a835b093718a535f6cb9e6c2388018bddcde9122c79b2d950b52b2" exitCode=2 Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.193930 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"88fce615-5e48-4041-b9b3-409bf6a29166","Type":"ContainerDied","Data":"8d2038133110eacd56fe1fc055ab94cfeb15af8891520ec151fc2c897c7eda39"} Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.193997 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"88fce615-5e48-4041-b9b3-409bf6a29166","Type":"ContainerDied","Data":"c436591ce9a835b093718a535f6cb9e6c2388018bddcde9122c79b2d950b52b2"} Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.195252 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"20082470-4513-4042-8a66-3117b8a387f4","Type":"ContainerStarted","Data":"56ec389d794a5bea6f977cb9f0565ec79392532a660dc99980c16f115ac9dbc9"} Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.232128 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.481680 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.481751 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:25:31 crc kubenswrapper[4687]: E1125 09:25:31.589148 4687 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/af23f4edfdffeb0d2f42f8f23f18340c20b326f59c473c06c3aa2872aaac58d6/diff" to get inode usage: stat /var/lib/containers/storage/overlay/af23f4edfdffeb0d2f42f8f23f18340c20b326f59c473c06c3aa2872aaac58d6/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_kube-state-metrics-0_1c8dccfc-e66b-4a60-b0c4-d7c94652dd12/kube-state-metrics/0.log" to get inode usage: stat /var/log/pods/openstack_kube-state-metrics-0_1c8dccfc-e66b-4a60-b0c4-d7c94652dd12/kube-state-metrics/0.log: no such file or directory Nov 25 09:25:31 crc kubenswrapper[4687]: I1125 09:25:31.745692 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c8dccfc-e66b-4a60-b0c4-d7c94652dd12" path="/var/lib/kubelet/pods/1c8dccfc-e66b-4a60-b0c4-d7c94652dd12/volumes" Nov 25 09:25:32 crc kubenswrapper[4687]: I1125 09:25:32.207209 4687 generic.go:334] "Generic (PLEG): container finished" podID="88fce615-5e48-4041-b9b3-409bf6a29166" containerID="f6289cd9775038ba529d1b864f27176b6c934b0cf7617233a23f14d9df43048b" exitCode=0 Nov 25 09:25:32 crc kubenswrapper[4687]: I1125 09:25:32.207267 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"88fce615-5e48-4041-b9b3-409bf6a29166","Type":"ContainerDied","Data":"f6289cd9775038ba529d1b864f27176b6c934b0cf7617233a23f14d9df43048b"} Nov 25 09:25:32 crc kubenswrapper[4687]: I1125 09:25:32.564702 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.191:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:25:32 crc kubenswrapper[4687]: I1125 09:25:32.564830 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.191:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:25:35 crc kubenswrapper[4687]: I1125 09:25:35.746405 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 09:25:35 crc kubenswrapper[4687]: I1125 09:25:35.746996 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 09:25:35 crc kubenswrapper[4687]: I1125 09:25:35.751560 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 09:25:35 crc kubenswrapper[4687]: I1125 09:25:35.752173 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 09:25:36 crc kubenswrapper[4687]: I1125 09:25:36.254003 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"20082470-4513-4042-8a66-3117b8a387f4","Type":"ContainerStarted","Data":"c7a32bb9dec635b0e43730fd5ac460cf5711dbae853f291719be8f04b517c87b"} Nov 25 09:25:36 crc kubenswrapper[4687]: I1125 09:25:36.254443 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 09:25:36 crc kubenswrapper[4687]: I1125 09:25:36.273530 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.212436203 podStartE2EDuration="6.273490357s" podCreationTimestamp="2025-11-25 09:25:30 +0000 UTC" firstStartedPulling="2025-11-25 09:25:31.098909116 +0000 UTC m=+1326.152548834" lastFinishedPulling="2025-11-25 09:25:35.15996327 +0000 UTC m=+1330.213602988" observedRunningTime="2025-11-25 09:25:36.270817704 +0000 UTC m=+1331.324457432" watchObservedRunningTime="2025-11-25 09:25:36.273490357 +0000 UTC m=+1331.327130075" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.264219 4687 generic.go:334] "Generic (PLEG): container finished" podID="88fce615-5e48-4041-b9b3-409bf6a29166" containerID="c900ffadeb11cde8958d08f0543cb428a3a85902d5e6b33a329f0d018c714155" exitCode=0 Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.264290 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"88fce615-5e48-4041-b9b3-409bf6a29166","Type":"ContainerDied","Data":"c900ffadeb11cde8958d08f0543cb428a3a85902d5e6b33a329f0d018c714155"} Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.458676 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.572958 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-scripts\") pod \"88fce615-5e48-4041-b9b3-409bf6a29166\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.573063 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-run-httpd\") pod \"88fce615-5e48-4041-b9b3-409bf6a29166\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.573182 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-config-data\") pod \"88fce615-5e48-4041-b9b3-409bf6a29166\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.573274 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-log-httpd\") pod \"88fce615-5e48-4041-b9b3-409bf6a29166\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.573297 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-sg-core-conf-yaml\") pod \"88fce615-5e48-4041-b9b3-409bf6a29166\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.573330 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-combined-ca-bundle\") pod \"88fce615-5e48-4041-b9b3-409bf6a29166\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.573356 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4z5vb\" (UniqueName: \"kubernetes.io/projected/88fce615-5e48-4041-b9b3-409bf6a29166-kube-api-access-4z5vb\") pod \"88fce615-5e48-4041-b9b3-409bf6a29166\" (UID: \"88fce615-5e48-4041-b9b3-409bf6a29166\") " Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.573849 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "88fce615-5e48-4041-b9b3-409bf6a29166" (UID: "88fce615-5e48-4041-b9b3-409bf6a29166"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.574148 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "88fce615-5e48-4041-b9b3-409bf6a29166" (UID: "88fce615-5e48-4041-b9b3-409bf6a29166"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.578923 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88fce615-5e48-4041-b9b3-409bf6a29166-kube-api-access-4z5vb" (OuterVolumeSpecName: "kube-api-access-4z5vb") pod "88fce615-5e48-4041-b9b3-409bf6a29166" (UID: "88fce615-5e48-4041-b9b3-409bf6a29166"). InnerVolumeSpecName "kube-api-access-4z5vb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.579699 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-scripts" (OuterVolumeSpecName: "scripts") pod "88fce615-5e48-4041-b9b3-409bf6a29166" (UID: "88fce615-5e48-4041-b9b3-409bf6a29166"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.612844 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "88fce615-5e48-4041-b9b3-409bf6a29166" (UID: "88fce615-5e48-4041-b9b3-409bf6a29166"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.663228 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "88fce615-5e48-4041-b9b3-409bf6a29166" (UID: "88fce615-5e48-4041-b9b3-409bf6a29166"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.665981 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-config-data" (OuterVolumeSpecName: "config-data") pod "88fce615-5e48-4041-b9b3-409bf6a29166" (UID: "88fce615-5e48-4041-b9b3-409bf6a29166"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.675161 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.675190 4687 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.675198 4687 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.675208 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.675217 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4z5vb\" (UniqueName: \"kubernetes.io/projected/88fce615-5e48-4041-b9b3-409bf6a29166-kube-api-access-4z5vb\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.675225 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88fce615-5e48-4041-b9b3-409bf6a29166-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:37 crc kubenswrapper[4687]: I1125 09:25:37.675235 4687 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/88fce615-5e48-4041-b9b3-409bf6a29166-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.226229 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.277204 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"88fce615-5e48-4041-b9b3-409bf6a29166","Type":"ContainerDied","Data":"26fad4ea2cbcf1d64323df8227206fa2cb699cb4994cc904dc5716976b65343c"} Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.277268 4687 scope.go:117] "RemoveContainer" containerID="8d2038133110eacd56fe1fc055ab94cfeb15af8891520ec151fc2c897c7eda39" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.277288 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.279463 4687 generic.go:334] "Generic (PLEG): container finished" podID="8e42ba40-1de7-4098-84a1-fe6673aedecf" containerID="d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81" exitCode=137 Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.279518 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8e42ba40-1de7-4098-84a1-fe6673aedecf","Type":"ContainerDied","Data":"d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81"} Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.279545 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8e42ba40-1de7-4098-84a1-fe6673aedecf","Type":"ContainerDied","Data":"be2f5b3664d86844373272da0c889dd8e26ce7c914bd07d5997c0ba8eb105515"} Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.279591 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.304591 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.318194 4687 scope.go:117] "RemoveContainer" containerID="c436591ce9a835b093718a535f6cb9e6c2388018bddcde9122c79b2d950b52b2" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.325431 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336113 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:38 crc kubenswrapper[4687]: E1125 09:25:38.336519 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="proxy-httpd" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336536 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="proxy-httpd" Nov 25 09:25:38 crc kubenswrapper[4687]: E1125 09:25:38.336560 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="sg-core" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336566 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="sg-core" Nov 25 09:25:38 crc kubenswrapper[4687]: E1125 09:25:38.336581 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e42ba40-1de7-4098-84a1-fe6673aedecf" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336587 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e42ba40-1de7-4098-84a1-fe6673aedecf" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 09:25:38 crc kubenswrapper[4687]: E1125 09:25:38.336598 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="ceilometer-notification-agent" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336604 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="ceilometer-notification-agent" Nov 25 09:25:38 crc kubenswrapper[4687]: E1125 09:25:38.336616 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="ceilometer-central-agent" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336622 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="ceilometer-central-agent" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336806 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="ceilometer-notification-agent" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336827 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="ceilometer-central-agent" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336838 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e42ba40-1de7-4098-84a1-fe6673aedecf" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336850 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="sg-core" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.336859 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" containerName="proxy-httpd" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.338639 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.341999 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.342237 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.342427 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.344361 4687 scope.go:117] "RemoveContainer" containerID="c900ffadeb11cde8958d08f0543cb428a3a85902d5e6b33a329f0d018c714155" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.354364 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.372666 4687 scope.go:117] "RemoveContainer" containerID="f6289cd9775038ba529d1b864f27176b6c934b0cf7617233a23f14d9df43048b" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.387055 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-545zc\" (UniqueName: \"kubernetes.io/projected/8e42ba40-1de7-4098-84a1-fe6673aedecf-kube-api-access-545zc\") pod \"8e42ba40-1de7-4098-84a1-fe6673aedecf\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.387118 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-config-data\") pod \"8e42ba40-1de7-4098-84a1-fe6673aedecf\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.387726 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-combined-ca-bundle\") pod \"8e42ba40-1de7-4098-84a1-fe6673aedecf\" (UID: \"8e42ba40-1de7-4098-84a1-fe6673aedecf\") " Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.387864 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.387981 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.388029 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-run-httpd\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.388056 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-log-httpd\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.388078 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.388092 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qn7d\" (UniqueName: \"kubernetes.io/projected/b778b1c4-f189-4082-b43f-8f057fc03ebf-kube-api-access-8qn7d\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.388120 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-config-data\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.388149 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-scripts\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.391928 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e42ba40-1de7-4098-84a1-fe6673aedecf-kube-api-access-545zc" (OuterVolumeSpecName: "kube-api-access-545zc") pod "8e42ba40-1de7-4098-84a1-fe6673aedecf" (UID: "8e42ba40-1de7-4098-84a1-fe6673aedecf"). InnerVolumeSpecName "kube-api-access-545zc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.393247 4687 scope.go:117] "RemoveContainer" containerID="d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.416879 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-config-data" (OuterVolumeSpecName: "config-data") pod "8e42ba40-1de7-4098-84a1-fe6673aedecf" (UID: "8e42ba40-1de7-4098-84a1-fe6673aedecf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.429582 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e42ba40-1de7-4098-84a1-fe6673aedecf" (UID: "8e42ba40-1de7-4098-84a1-fe6673aedecf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489514 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489586 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-run-httpd\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489627 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-log-httpd\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489651 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489667 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qn7d\" (UniqueName: \"kubernetes.io/projected/b778b1c4-f189-4082-b43f-8f057fc03ebf-kube-api-access-8qn7d\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489694 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-config-data\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489724 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-scripts\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489743 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489826 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489838 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-545zc\" (UniqueName: \"kubernetes.io/projected/8e42ba40-1de7-4098-84a1-fe6673aedecf-kube-api-access-545zc\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.489849 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e42ba40-1de7-4098-84a1-fe6673aedecf-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.490450 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-run-httpd\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.491912 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-log-httpd\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.492144 4687 scope.go:117] "RemoveContainer" containerID="d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81" Nov 25 09:25:38 crc kubenswrapper[4687]: E1125 09:25:38.492641 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81\": container with ID starting with d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81 not found: ID does not exist" containerID="d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.492688 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81"} err="failed to get container status \"d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81\": rpc error: code = NotFound desc = could not find container \"d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81\": container with ID starting with d2ba9b3228f4f599555b330eb247ddcc9a78ca0a7b1beb271726a9777a4c8a81 not found: ID does not exist" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.494144 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.494170 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.495145 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-scripts\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.495323 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-config-data\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.498819 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.506700 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qn7d\" (UniqueName: \"kubernetes.io/projected/b778b1c4-f189-4082-b43f-8f057fc03ebf-kube-api-access-8qn7d\") pod \"ceilometer-0\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.653349 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.669616 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.697020 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.726476 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.727861 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.730311 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.730310 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.730472 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.735833 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.793549 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.793631 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjnxn\" (UniqueName: \"kubernetes.io/projected/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-kube-api-access-qjnxn\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.793661 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.793683 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.793717 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.895835 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.895891 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjnxn\" (UniqueName: \"kubernetes.io/projected/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-kube-api-access-qjnxn\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.895937 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.895961 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.896008 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.900616 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.900712 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.900809 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.901386 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:38 crc kubenswrapper[4687]: I1125 09:25:38.912412 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjnxn\" (UniqueName: \"kubernetes.io/projected/059b7fdf-9ca4-4f03-afa0-ee554a6aa858-kube-api-access-qjnxn\") pod \"nova-cell1-novncproxy-0\" (UID: \"059b7fdf-9ca4-4f03-afa0-ee554a6aa858\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:39 crc kubenswrapper[4687]: I1125 09:25:39.055185 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:39 crc kubenswrapper[4687]: I1125 09:25:39.122163 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:39 crc kubenswrapper[4687]: W1125 09:25:39.151608 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb778b1c4_f189_4082_b43f_8f057fc03ebf.slice/crio-d8880ed9a575b434fc40fa51698016908da9d454aca4a56d01aa766135931646 WatchSource:0}: Error finding container d8880ed9a575b434fc40fa51698016908da9d454aca4a56d01aa766135931646: Status 404 returned error can't find the container with id d8880ed9a575b434fc40fa51698016908da9d454aca4a56d01aa766135931646 Nov 25 09:25:39 crc kubenswrapper[4687]: I1125 09:25:39.290882 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b778b1c4-f189-4082-b43f-8f057fc03ebf","Type":"ContainerStarted","Data":"d8880ed9a575b434fc40fa51698016908da9d454aca4a56d01aa766135931646"} Nov 25 09:25:39 crc kubenswrapper[4687]: I1125 09:25:39.491302 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:25:39 crc kubenswrapper[4687]: W1125 09:25:39.495376 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod059b7fdf_9ca4_4f03_afa0_ee554a6aa858.slice/crio-6f6ee4e13d89830a139c3f182f3b0196e4d9a8544d1fc38ac6e50a6d9c185b00 WatchSource:0}: Error finding container 6f6ee4e13d89830a139c3f182f3b0196e4d9a8544d1fc38ac6e50a6d9c185b00: Status 404 returned error can't find the container with id 6f6ee4e13d89830a139c3f182f3b0196e4d9a8544d1fc38ac6e50a6d9c185b00 Nov 25 09:25:39 crc kubenswrapper[4687]: I1125 09:25:39.748771 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88fce615-5e48-4041-b9b3-409bf6a29166" path="/var/lib/kubelet/pods/88fce615-5e48-4041-b9b3-409bf6a29166/volumes" Nov 25 09:25:39 crc kubenswrapper[4687]: I1125 09:25:39.750654 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e42ba40-1de7-4098-84a1-fe6673aedecf" path="/var/lib/kubelet/pods/8e42ba40-1de7-4098-84a1-fe6673aedecf/volumes" Nov 25 09:25:40 crc kubenswrapper[4687]: I1125 09:25:40.301842 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"059b7fdf-9ca4-4f03-afa0-ee554a6aa858","Type":"ContainerStarted","Data":"e6bf87a9093c77ce8c792423b4ef974cdba6efe9f65416f62554414c3a033adf"} Nov 25 09:25:40 crc kubenswrapper[4687]: I1125 09:25:40.302159 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"059b7fdf-9ca4-4f03-afa0-ee554a6aa858","Type":"ContainerStarted","Data":"6f6ee4e13d89830a139c3f182f3b0196e4d9a8544d1fc38ac6e50a6d9c185b00"} Nov 25 09:25:40 crc kubenswrapper[4687]: I1125 09:25:40.303710 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b778b1c4-f189-4082-b43f-8f057fc03ebf","Type":"ContainerStarted","Data":"c323e0335bdc8cc85ae120715c38eb42ab847bb07a6911e8580b3aa3667f3dd5"} Nov 25 09:25:40 crc kubenswrapper[4687]: I1125 09:25:40.318999 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.318978387 podStartE2EDuration="2.318978387s" podCreationTimestamp="2025-11-25 09:25:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:40.318975276 +0000 UTC m=+1335.372614994" watchObservedRunningTime="2025-11-25 09:25:40.318978387 +0000 UTC m=+1335.372618115" Nov 25 09:25:40 crc kubenswrapper[4687]: I1125 09:25:40.623096 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 09:25:41 crc kubenswrapper[4687]: I1125 09:25:41.318624 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b778b1c4-f189-4082-b43f-8f057fc03ebf","Type":"ContainerStarted","Data":"a91e3186135e5e51997d0672de3cd5fd1e92d145fdf82a0df09311d621a93285"} Nov 25 09:25:41 crc kubenswrapper[4687]: I1125 09:25:41.485083 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 09:25:41 crc kubenswrapper[4687]: I1125 09:25:41.485416 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 09:25:41 crc kubenswrapper[4687]: I1125 09:25:41.488823 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 09:25:41 crc kubenswrapper[4687]: I1125 09:25:41.493610 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.328791 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b778b1c4-f189-4082-b43f-8f057fc03ebf","Type":"ContainerStarted","Data":"535225d86c302950e643cf2bcf942c3b6720f0b47aedc164b4d4599e0bf3ced8"} Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.329459 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.334072 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.490822 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-nng4f"] Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.494081 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.519980 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-nng4f"] Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.596102 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk89q\" (UniqueName: \"kubernetes.io/projected/52d224c6-fc25-45ce-bb59-9d91bf05df17-kube-api-access-nk89q\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.596226 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.596276 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-config\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.596304 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.596333 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.596369 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.698671 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.699071 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-config\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.699119 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.699152 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.699202 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.699343 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk89q\" (UniqueName: \"kubernetes.io/projected/52d224c6-fc25-45ce-bb59-9d91bf05df17-kube-api-access-nk89q\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.700116 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.700336 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.700534 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.700836 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.702280 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-config\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.735608 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk89q\" (UniqueName: \"kubernetes.io/projected/52d224c6-fc25-45ce-bb59-9d91bf05df17-kube-api-access-nk89q\") pod \"dnsmasq-dns-cd5cbd7b9-nng4f\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:42 crc kubenswrapper[4687]: I1125 09:25:42.819569 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:43 crc kubenswrapper[4687]: I1125 09:25:43.346416 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b778b1c4-f189-4082-b43f-8f057fc03ebf","Type":"ContainerStarted","Data":"74df3188c2ad225cc3c275a17c9d18f27402fe9a6bac0d70003feedeba6c01e8"} Nov 25 09:25:43 crc kubenswrapper[4687]: I1125 09:25:43.374181 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-nng4f"] Nov 25 09:25:43 crc kubenswrapper[4687]: I1125 09:25:43.374390 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.626709252 podStartE2EDuration="5.374380987s" podCreationTimestamp="2025-11-25 09:25:38 +0000 UTC" firstStartedPulling="2025-11-25 09:25:39.156044032 +0000 UTC m=+1334.209683750" lastFinishedPulling="2025-11-25 09:25:42.903715767 +0000 UTC m=+1337.957355485" observedRunningTime="2025-11-25 09:25:43.370277605 +0000 UTC m=+1338.423917323" watchObservedRunningTime="2025-11-25 09:25:43.374380987 +0000 UTC m=+1338.428020705" Nov 25 09:25:43 crc kubenswrapper[4687]: W1125 09:25:43.378091 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52d224c6_fc25_45ce_bb59_9d91bf05df17.slice/crio-ac53d1d98b7ef2ceebdae38d25cd3973862db2b3491344499de0d9e951664b14 WatchSource:0}: Error finding container ac53d1d98b7ef2ceebdae38d25cd3973862db2b3491344499de0d9e951664b14: Status 404 returned error can't find the container with id ac53d1d98b7ef2ceebdae38d25cd3973862db2b3491344499de0d9e951664b14 Nov 25 09:25:44 crc kubenswrapper[4687]: I1125 09:25:44.055811 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:44 crc kubenswrapper[4687]: I1125 09:25:44.358564 4687 generic.go:334] "Generic (PLEG): container finished" podID="52d224c6-fc25-45ce-bb59-9d91bf05df17" containerID="927a9a3b8d667f984bae7f7cb2cd2dba24186e188846298950bfafdc783bd64f" exitCode=0 Nov 25 09:25:44 crc kubenswrapper[4687]: I1125 09:25:44.358746 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" event={"ID":"52d224c6-fc25-45ce-bb59-9d91bf05df17","Type":"ContainerDied","Data":"927a9a3b8d667f984bae7f7cb2cd2dba24186e188846298950bfafdc783bd64f"} Nov 25 09:25:44 crc kubenswrapper[4687]: I1125 09:25:44.358826 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" event={"ID":"52d224c6-fc25-45ce-bb59-9d91bf05df17","Type":"ContainerStarted","Data":"ac53d1d98b7ef2ceebdae38d25cd3973862db2b3491344499de0d9e951664b14"} Nov 25 09:25:44 crc kubenswrapper[4687]: I1125 09:25:44.359126 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:25:45 crc kubenswrapper[4687]: I1125 09:25:45.031978 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:45 crc kubenswrapper[4687]: I1125 09:25:45.222089 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:45 crc kubenswrapper[4687]: I1125 09:25:45.368825 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" event={"ID":"52d224c6-fc25-45ce-bb59-9d91bf05df17","Type":"ContainerStarted","Data":"3a4b3a62093040a6d62c070b53efca092d4098caa5f02ab9a007960a259e5813"} Nov 25 09:25:45 crc kubenswrapper[4687]: I1125 09:25:45.369260 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerName="nova-api-log" containerID="cri-o://5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91" gracePeriod=30 Nov 25 09:25:45 crc kubenswrapper[4687]: I1125 09:25:45.369359 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerName="nova-api-api" containerID="cri-o://899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb" gracePeriod=30 Nov 25 09:25:45 crc kubenswrapper[4687]: I1125 09:25:45.404166 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" podStartSLOduration=3.404144178 podStartE2EDuration="3.404144178s" podCreationTimestamp="2025-11-25 09:25:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:45.39684978 +0000 UTC m=+1340.450489518" watchObservedRunningTime="2025-11-25 09:25:45.404144178 +0000 UTC m=+1340.457783896" Nov 25 09:25:46 crc kubenswrapper[4687]: I1125 09:25:46.377358 4687 generic.go:334] "Generic (PLEG): container finished" podID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerID="5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91" exitCode=143 Nov 25 09:25:46 crc kubenswrapper[4687]: I1125 09:25:46.377466 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e41b4fe4-f7e3-4321-8d63-555a08900ab1","Type":"ContainerDied","Data":"5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91"} Nov 25 09:25:46 crc kubenswrapper[4687]: I1125 09:25:46.378538 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:46 crc kubenswrapper[4687]: I1125 09:25:46.378696 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="ceilometer-central-agent" containerID="cri-o://c323e0335bdc8cc85ae120715c38eb42ab847bb07a6911e8580b3aa3667f3dd5" gracePeriod=30 Nov 25 09:25:46 crc kubenswrapper[4687]: I1125 09:25:46.378718 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="ceilometer-notification-agent" containerID="cri-o://a91e3186135e5e51997d0672de3cd5fd1e92d145fdf82a0df09311d621a93285" gracePeriod=30 Nov 25 09:25:46 crc kubenswrapper[4687]: I1125 09:25:46.378720 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="sg-core" containerID="cri-o://535225d86c302950e643cf2bcf942c3b6720f0b47aedc164b4d4599e0bf3ced8" gracePeriod=30 Nov 25 09:25:46 crc kubenswrapper[4687]: I1125 09:25:46.378827 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="proxy-httpd" containerID="cri-o://74df3188c2ad225cc3c275a17c9d18f27402fe9a6bac0d70003feedeba6c01e8" gracePeriod=30 Nov 25 09:25:47 crc kubenswrapper[4687]: I1125 09:25:47.389591 4687 generic.go:334] "Generic (PLEG): container finished" podID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerID="74df3188c2ad225cc3c275a17c9d18f27402fe9a6bac0d70003feedeba6c01e8" exitCode=0 Nov 25 09:25:47 crc kubenswrapper[4687]: I1125 09:25:47.390656 4687 generic.go:334] "Generic (PLEG): container finished" podID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerID="535225d86c302950e643cf2bcf942c3b6720f0b47aedc164b4d4599e0bf3ced8" exitCode=2 Nov 25 09:25:47 crc kubenswrapper[4687]: I1125 09:25:47.390746 4687 generic.go:334] "Generic (PLEG): container finished" podID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerID="a91e3186135e5e51997d0672de3cd5fd1e92d145fdf82a0df09311d621a93285" exitCode=0 Nov 25 09:25:47 crc kubenswrapper[4687]: I1125 09:25:47.389678 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b778b1c4-f189-4082-b43f-8f057fc03ebf","Type":"ContainerDied","Data":"74df3188c2ad225cc3c275a17c9d18f27402fe9a6bac0d70003feedeba6c01e8"} Nov 25 09:25:47 crc kubenswrapper[4687]: I1125 09:25:47.390874 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b778b1c4-f189-4082-b43f-8f057fc03ebf","Type":"ContainerDied","Data":"535225d86c302950e643cf2bcf942c3b6720f0b47aedc164b4d4599e0bf3ced8"} Nov 25 09:25:47 crc kubenswrapper[4687]: I1125 09:25:47.390893 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b778b1c4-f189-4082-b43f-8f057fc03ebf","Type":"ContainerDied","Data":"a91e3186135e5e51997d0672de3cd5fd1e92d145fdf82a0df09311d621a93285"} Nov 25 09:25:48 crc kubenswrapper[4687]: E1125 09:25:48.343889 4687 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb778b1c4_f189_4082_b43f_8f057fc03ebf.slice/crio-c323e0335bdc8cc85ae120715c38eb42ab847bb07a6911e8580b3aa3667f3dd5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb778b1c4_f189_4082_b43f_8f057fc03ebf.slice/crio-conmon-c323e0335bdc8cc85ae120715c38eb42ab847bb07a6911e8580b3aa3667f3dd5.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.413588 4687 generic.go:334] "Generic (PLEG): container finished" podID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerID="c323e0335bdc8cc85ae120715c38eb42ab847bb07a6911e8580b3aa3667f3dd5" exitCode=0 Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.413639 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b778b1c4-f189-4082-b43f-8f057fc03ebf","Type":"ContainerDied","Data":"c323e0335bdc8cc85ae120715c38eb42ab847bb07a6911e8580b3aa3667f3dd5"} Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.719005 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.827570 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.858754 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-ceilometer-tls-certs\") pod \"b778b1c4-f189-4082-b43f-8f057fc03ebf\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.858822 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-combined-ca-bundle\") pod \"b778b1c4-f189-4082-b43f-8f057fc03ebf\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.858877 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-config-data\") pod \"b778b1c4-f189-4082-b43f-8f057fc03ebf\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.859001 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-scripts\") pod \"b778b1c4-f189-4082-b43f-8f057fc03ebf\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.859071 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qn7d\" (UniqueName: \"kubernetes.io/projected/b778b1c4-f189-4082-b43f-8f057fc03ebf-kube-api-access-8qn7d\") pod \"b778b1c4-f189-4082-b43f-8f057fc03ebf\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.859101 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-sg-core-conf-yaml\") pod \"b778b1c4-f189-4082-b43f-8f057fc03ebf\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.859165 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-run-httpd\") pod \"b778b1c4-f189-4082-b43f-8f057fc03ebf\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.859236 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-log-httpd\") pod \"b778b1c4-f189-4082-b43f-8f057fc03ebf\" (UID: \"b778b1c4-f189-4082-b43f-8f057fc03ebf\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.861361 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b778b1c4-f189-4082-b43f-8f057fc03ebf" (UID: "b778b1c4-f189-4082-b43f-8f057fc03ebf"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.861667 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b778b1c4-f189-4082-b43f-8f057fc03ebf" (UID: "b778b1c4-f189-4082-b43f-8f057fc03ebf"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.865763 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b778b1c4-f189-4082-b43f-8f057fc03ebf-kube-api-access-8qn7d" (OuterVolumeSpecName: "kube-api-access-8qn7d") pod "b778b1c4-f189-4082-b43f-8f057fc03ebf" (UID: "b778b1c4-f189-4082-b43f-8f057fc03ebf"). InnerVolumeSpecName "kube-api-access-8qn7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.868766 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-scripts" (OuterVolumeSpecName: "scripts") pod "b778b1c4-f189-4082-b43f-8f057fc03ebf" (UID: "b778b1c4-f189-4082-b43f-8f057fc03ebf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.890714 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b778b1c4-f189-4082-b43f-8f057fc03ebf" (UID: "b778b1c4-f189-4082-b43f-8f057fc03ebf"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.922028 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "b778b1c4-f189-4082-b43f-8f057fc03ebf" (UID: "b778b1c4-f189-4082-b43f-8f057fc03ebf"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.957851 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b778b1c4-f189-4082-b43f-8f057fc03ebf" (UID: "b778b1c4-f189-4082-b43f-8f057fc03ebf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.961407 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-combined-ca-bundle\") pod \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.961597 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e41b4fe4-f7e3-4321-8d63-555a08900ab1-logs\") pod \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.961703 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-config-data\") pod \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.961767 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fc5nk\" (UniqueName: \"kubernetes.io/projected/e41b4fe4-f7e3-4321-8d63-555a08900ab1-kube-api-access-fc5nk\") pod \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\" (UID: \"e41b4fe4-f7e3-4321-8d63-555a08900ab1\") " Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.962170 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e41b4fe4-f7e3-4321-8d63-555a08900ab1-logs" (OuterVolumeSpecName: "logs") pod "e41b4fe4-f7e3-4321-8d63-555a08900ab1" (UID: "e41b4fe4-f7e3-4321-8d63-555a08900ab1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.962473 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e41b4fe4-f7e3-4321-8d63-555a08900ab1-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.962517 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.962534 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8qn7d\" (UniqueName: \"kubernetes.io/projected/b778b1c4-f189-4082-b43f-8f057fc03ebf-kube-api-access-8qn7d\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.962552 4687 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.962564 4687 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.962578 4687 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b778b1c4-f189-4082-b43f-8f057fc03ebf-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.962590 4687 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.962601 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.966755 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e41b4fe4-f7e3-4321-8d63-555a08900ab1-kube-api-access-fc5nk" (OuterVolumeSpecName: "kube-api-access-fc5nk") pod "e41b4fe4-f7e3-4321-8d63-555a08900ab1" (UID: "e41b4fe4-f7e3-4321-8d63-555a08900ab1"). InnerVolumeSpecName "kube-api-access-fc5nk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.987802 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e41b4fe4-f7e3-4321-8d63-555a08900ab1" (UID: "e41b4fe4-f7e3-4321-8d63-555a08900ab1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:48 crc kubenswrapper[4687]: I1125 09:25:48.991374 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-config-data" (OuterVolumeSpecName: "config-data") pod "e41b4fe4-f7e3-4321-8d63-555a08900ab1" (UID: "e41b4fe4-f7e3-4321-8d63-555a08900ab1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.027716 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-config-data" (OuterVolumeSpecName: "config-data") pod "b778b1c4-f189-4082-b43f-8f057fc03ebf" (UID: "b778b1c4-f189-4082-b43f-8f057fc03ebf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.056233 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.064658 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.064822 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fc5nk\" (UniqueName: \"kubernetes.io/projected/e41b4fe4-f7e3-4321-8d63-555a08900ab1-kube-api-access-fc5nk\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.064837 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e41b4fe4-f7e3-4321-8d63-555a08900ab1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.064847 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b778b1c4-f189-4082-b43f-8f057fc03ebf-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.074964 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.435017 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b778b1c4-f189-4082-b43f-8f057fc03ebf","Type":"ContainerDied","Data":"d8880ed9a575b434fc40fa51698016908da9d454aca4a56d01aa766135931646"} Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.435068 4687 scope.go:117] "RemoveContainer" containerID="74df3188c2ad225cc3c275a17c9d18f27402fe9a6bac0d70003feedeba6c01e8" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.435189 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.442993 4687 generic.go:334] "Generic (PLEG): container finished" podID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerID="899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb" exitCode=0 Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.444622 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.446723 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e41b4fe4-f7e3-4321-8d63-555a08900ab1","Type":"ContainerDied","Data":"899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb"} Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.446838 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e41b4fe4-f7e3-4321-8d63-555a08900ab1","Type":"ContainerDied","Data":"5f2fd351fab3d146c7f7178056b3bc33b8726c5bfa9666ad3b00710f26f36c7e"} Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.472772 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.474555 4687 scope.go:117] "RemoveContainer" containerID="535225d86c302950e643cf2bcf942c3b6720f0b47aedc164b4d4599e0bf3ced8" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.486958 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.489623 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.499098 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.501220 4687 scope.go:117] "RemoveContainer" containerID="a91e3186135e5e51997d0672de3cd5fd1e92d145fdf82a0df09311d621a93285" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.510613 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.553261 4687 scope.go:117] "RemoveContainer" containerID="c323e0335bdc8cc85ae120715c38eb42ab847bb07a6911e8580b3aa3667f3dd5" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.555284 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:49 crc kubenswrapper[4687]: E1125 09:25:49.555758 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="ceilometer-notification-agent" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.555777 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="ceilometer-notification-agent" Nov 25 09:25:49 crc kubenswrapper[4687]: E1125 09:25:49.555796 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="proxy-httpd" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.555805 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="proxy-httpd" Nov 25 09:25:49 crc kubenswrapper[4687]: E1125 09:25:49.555833 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerName="nova-api-api" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.555841 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerName="nova-api-api" Nov 25 09:25:49 crc kubenswrapper[4687]: E1125 09:25:49.555860 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerName="nova-api-log" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.555867 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerName="nova-api-log" Nov 25 09:25:49 crc kubenswrapper[4687]: E1125 09:25:49.555887 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="ceilometer-central-agent" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.555897 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="ceilometer-central-agent" Nov 25 09:25:49 crc kubenswrapper[4687]: E1125 09:25:49.555916 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="sg-core" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.555924 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="sg-core" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.556139 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerName="nova-api-log" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.556160 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="proxy-httpd" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.556175 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="ceilometer-central-agent" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.556190 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" containerName="nova-api-api" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.556215 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="ceilometer-notification-agent" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.556234 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" containerName="sg-core" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.561390 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.566074 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.566382 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.566962 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.568994 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.593761 4687 scope.go:117] "RemoveContainer" containerID="899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.597160 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.598948 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.601572 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.601813 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.601821 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.640891 4687 scope.go:117] "RemoveContainer" containerID="5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.648181 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.676902 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.676989 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-log-httpd\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.677023 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqlcf\" (UniqueName: \"kubernetes.io/projected/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-kube-api-access-rqlcf\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.677243 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.677538 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-config-data\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.677603 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-run-httpd\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.677665 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.677734 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-scripts\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.679836 4687 scope.go:117] "RemoveContainer" containerID="899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb" Nov 25 09:25:49 crc kubenswrapper[4687]: E1125 09:25:49.681613 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb\": container with ID starting with 899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb not found: ID does not exist" containerID="899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.681666 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb"} err="failed to get container status \"899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb\": rpc error: code = NotFound desc = could not find container \"899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb\": container with ID starting with 899292de27fa70bbe6cb91e6f4d80a85114ed708fedbc5c34a0cbb5b36692feb not found: ID does not exist" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.681700 4687 scope.go:117] "RemoveContainer" containerID="5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91" Nov 25 09:25:49 crc kubenswrapper[4687]: E1125 09:25:49.685003 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91\": container with ID starting with 5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91 not found: ID does not exist" containerID="5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.685037 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91"} err="failed to get container status \"5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91\": rpc error: code = NotFound desc = could not find container \"5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91\": container with ID starting with 5ef8aba58036a427fc841ff969bc96ff97424e4363b721b54f5f5a0c99cedd91 not found: ID does not exist" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.694417 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-bsrx2"] Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.696061 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.710776 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.719002 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-bsrx2"] Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.731720 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.760268 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b778b1c4-f189-4082-b43f-8f057fc03ebf" path="/var/lib/kubelet/pods/b778b1c4-f189-4082-b43f-8f057fc03ebf/volumes" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.761222 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e41b4fe4-f7e3-4321-8d63-555a08900ab1" path="/var/lib/kubelet/pods/e41b4fe4-f7e3-4321-8d63-555a08900ab1/volumes" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.780046 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cee1023b-62ef-498d-9603-3a01d75f4971-logs\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.780116 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-scripts\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.780201 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.780290 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vzsw\" (UniqueName: \"kubernetes.io/projected/cee1023b-62ef-498d-9603-3a01d75f4971-kube-api-access-5vzsw\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.780351 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-log-httpd\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.780385 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqlcf\" (UniqueName: \"kubernetes.io/projected/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-kube-api-access-rqlcf\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.780418 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.780444 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.780645 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-public-tls-certs\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.780862 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.781079 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-config-data\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.781106 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-run-httpd\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.781144 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.781165 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-config-data\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.781327 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-log-httpd\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.781842 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-run-httpd\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.786897 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.787405 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.789193 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.789865 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-scripts\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.799228 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-config-data\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.802666 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqlcf\" (UniqueName: \"kubernetes.io/projected/14bf5e9a-1354-4b0d-a475-2d3de20a07fd-kube-api-access-rqlcf\") pod \"ceilometer-0\" (UID: \"14bf5e9a-1354-4b0d-a475-2d3de20a07fd\") " pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.882882 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-config-data\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.883255 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cee1023b-62ef-498d-9603-3a01d75f4971-logs\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.883401 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-config-data\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.883559 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vzsw\" (UniqueName: \"kubernetes.io/projected/cee1023b-62ef-498d-9603-3a01d75f4971-kube-api-access-5vzsw\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.883732 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qltlw\" (UniqueName: \"kubernetes.io/projected/5f5e992c-a5a9-437d-9f18-89684260190c-kube-api-access-qltlw\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.883890 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.884068 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.884281 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cee1023b-62ef-498d-9603-3a01d75f4971-logs\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.884289 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-public-tls-certs\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.884478 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.884639 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-scripts\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.888713 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.889210 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.890559 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.893089 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-config-data\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.896059 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-public-tls-certs\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.903691 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vzsw\" (UniqueName: \"kubernetes.io/projected/cee1023b-62ef-498d-9603-3a01d75f4971-kube-api-access-5vzsw\") pod \"nova-api-0\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.940613 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.989517 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.989614 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-scripts\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.989842 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-config-data\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.989899 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qltlw\" (UniqueName: \"kubernetes.io/projected/5f5e992c-a5a9-437d-9f18-89684260190c-kube-api-access-qltlw\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.996452 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-config-data\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.998428 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:49 crc kubenswrapper[4687]: I1125 09:25:49.998740 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-scripts\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:50 crc kubenswrapper[4687]: I1125 09:25:50.018529 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qltlw\" (UniqueName: \"kubernetes.io/projected/5f5e992c-a5a9-437d-9f18-89684260190c-kube-api-access-qltlw\") pod \"nova-cell1-cell-mapping-bsrx2\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:50 crc kubenswrapper[4687]: I1125 09:25:50.032778 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:50 crc kubenswrapper[4687]: I1125 09:25:50.410608 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:25:50 crc kubenswrapper[4687]: I1125 09:25:50.455067 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"14bf5e9a-1354-4b0d-a475-2d3de20a07fd","Type":"ContainerStarted","Data":"5c2ecebabb71149db79e65ac135503368490d4a70d9e5ff0a8cccd265863461f"} Nov 25 09:25:50 crc kubenswrapper[4687]: I1125 09:25:50.508375 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:50 crc kubenswrapper[4687]: I1125 09:25:50.600906 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-bsrx2"] Nov 25 09:25:51 crc kubenswrapper[4687]: I1125 09:25:51.467403 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"14bf5e9a-1354-4b0d-a475-2d3de20a07fd","Type":"ContainerStarted","Data":"649b64a19ba30885c5922c3a5e28050d22e9d0147eb6e5405ae71c351eddd1a8"} Nov 25 09:25:51 crc kubenswrapper[4687]: I1125 09:25:51.469607 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bsrx2" event={"ID":"5f5e992c-a5a9-437d-9f18-89684260190c","Type":"ContainerStarted","Data":"68b8ca17adfd03dd0df30f5ddfc10ae586ef55f2e271a15face759ef764d9fa0"} Nov 25 09:25:51 crc kubenswrapper[4687]: I1125 09:25:51.469647 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bsrx2" event={"ID":"5f5e992c-a5a9-437d-9f18-89684260190c","Type":"ContainerStarted","Data":"7cb2ccc3cc2ec34cb9dc62ba09570ce82c6ac1331ea7f6193bb064ca61477c30"} Nov 25 09:25:51 crc kubenswrapper[4687]: I1125 09:25:51.471708 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cee1023b-62ef-498d-9603-3a01d75f4971","Type":"ContainerStarted","Data":"9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f"} Nov 25 09:25:51 crc kubenswrapper[4687]: I1125 09:25:51.471757 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cee1023b-62ef-498d-9603-3a01d75f4971","Type":"ContainerStarted","Data":"b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95"} Nov 25 09:25:51 crc kubenswrapper[4687]: I1125 09:25:51.471771 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cee1023b-62ef-498d-9603-3a01d75f4971","Type":"ContainerStarted","Data":"c99d445bd40d7eb9a9a3f5cdce6cbdd342a0cec2548ac36fbb6e4d522905970e"} Nov 25 09:25:51 crc kubenswrapper[4687]: I1125 09:25:51.493611 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-bsrx2" podStartSLOduration=2.493586666 podStartE2EDuration="2.493586666s" podCreationTimestamp="2025-11-25 09:25:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:51.483397358 +0000 UTC m=+1346.537037076" watchObservedRunningTime="2025-11-25 09:25:51.493586666 +0000 UTC m=+1346.547226384" Nov 25 09:25:51 crc kubenswrapper[4687]: I1125 09:25:51.519068 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.51904895 podStartE2EDuration="2.51904895s" podCreationTimestamp="2025-11-25 09:25:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:25:51.502925551 +0000 UTC m=+1346.556565269" watchObservedRunningTime="2025-11-25 09:25:51.51904895 +0000 UTC m=+1346.572688668" Nov 25 09:25:52 crc kubenswrapper[4687]: I1125 09:25:52.493765 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"14bf5e9a-1354-4b0d-a475-2d3de20a07fd","Type":"ContainerStarted","Data":"9b602b322fee559cf444f91b3a015add41b8c5e5c3f9aab364c0b0e22ed98a40"} Nov 25 09:25:52 crc kubenswrapper[4687]: I1125 09:25:52.821389 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:25:52 crc kubenswrapper[4687]: I1125 09:25:52.882182 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-67tr7"] Nov 25 09:25:52 crc kubenswrapper[4687]: I1125 09:25:52.882561 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" podUID="dbf632e8-4b43-4ff2-991b-94111453b58b" containerName="dnsmasq-dns" containerID="cri-o://e7b7434c73baa55375567695d5fd34ed47aa5a940b91285096fc2b8f59a7d22d" gracePeriod=10 Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.503912 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"14bf5e9a-1354-4b0d-a475-2d3de20a07fd","Type":"ContainerStarted","Data":"e52d495d1f60398ed1a31425a500518aa6d1697376febde6336c70176805ded3"} Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.505868 4687 generic.go:334] "Generic (PLEG): container finished" podID="dbf632e8-4b43-4ff2-991b-94111453b58b" containerID="e7b7434c73baa55375567695d5fd34ed47aa5a940b91285096fc2b8f59a7d22d" exitCode=0 Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.505896 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" event={"ID":"dbf632e8-4b43-4ff2-991b-94111453b58b","Type":"ContainerDied","Data":"e7b7434c73baa55375567695d5fd34ed47aa5a940b91285096fc2b8f59a7d22d"} Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.605616 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.672997 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-sb\") pod \"dbf632e8-4b43-4ff2-991b-94111453b58b\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.673677 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-swift-storage-0\") pod \"dbf632e8-4b43-4ff2-991b-94111453b58b\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.673867 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-config\") pod \"dbf632e8-4b43-4ff2-991b-94111453b58b\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.674080 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-svc\") pod \"dbf632e8-4b43-4ff2-991b-94111453b58b\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.674191 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5m7z\" (UniqueName: \"kubernetes.io/projected/dbf632e8-4b43-4ff2-991b-94111453b58b-kube-api-access-g5m7z\") pod \"dbf632e8-4b43-4ff2-991b-94111453b58b\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.674375 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-nb\") pod \"dbf632e8-4b43-4ff2-991b-94111453b58b\" (UID: \"dbf632e8-4b43-4ff2-991b-94111453b58b\") " Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.685990 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbf632e8-4b43-4ff2-991b-94111453b58b-kube-api-access-g5m7z" (OuterVolumeSpecName: "kube-api-access-g5m7z") pod "dbf632e8-4b43-4ff2-991b-94111453b58b" (UID: "dbf632e8-4b43-4ff2-991b-94111453b58b"). InnerVolumeSpecName "kube-api-access-g5m7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.754917 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-config" (OuterVolumeSpecName: "config") pod "dbf632e8-4b43-4ff2-991b-94111453b58b" (UID: "dbf632e8-4b43-4ff2-991b-94111453b58b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.756987 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dbf632e8-4b43-4ff2-991b-94111453b58b" (UID: "dbf632e8-4b43-4ff2-991b-94111453b58b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.765223 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dbf632e8-4b43-4ff2-991b-94111453b58b" (UID: "dbf632e8-4b43-4ff2-991b-94111453b58b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.777052 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.777091 4687 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.777103 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.777114 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5m7z\" (UniqueName: \"kubernetes.io/projected/dbf632e8-4b43-4ff2-991b-94111453b58b-kube-api-access-g5m7z\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.781772 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dbf632e8-4b43-4ff2-991b-94111453b58b" (UID: "dbf632e8-4b43-4ff2-991b-94111453b58b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.782171 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dbf632e8-4b43-4ff2-991b-94111453b58b" (UID: "dbf632e8-4b43-4ff2-991b-94111453b58b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.878577 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:53 crc kubenswrapper[4687]: I1125 09:25:53.878607 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbf632e8-4b43-4ff2-991b-94111453b58b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:54 crc kubenswrapper[4687]: I1125 09:25:54.516597 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" event={"ID":"dbf632e8-4b43-4ff2-991b-94111453b58b","Type":"ContainerDied","Data":"53831e7b61e4ef5681e7cd2e5f67929ad3a6b7e399596a6a806dab549aa15a5f"} Nov 25 09:25:54 crc kubenswrapper[4687]: I1125 09:25:54.516943 4687 scope.go:117] "RemoveContainer" containerID="e7b7434c73baa55375567695d5fd34ed47aa5a940b91285096fc2b8f59a7d22d" Nov 25 09:25:54 crc kubenswrapper[4687]: I1125 09:25:54.516623 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" Nov 25 09:25:54 crc kubenswrapper[4687]: I1125 09:25:54.522283 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"14bf5e9a-1354-4b0d-a475-2d3de20a07fd","Type":"ContainerStarted","Data":"3c2f463ec9880979dbdfccc3a16e43a156c20f7e8e9095ef672f1ff3012fec4b"} Nov 25 09:25:54 crc kubenswrapper[4687]: I1125 09:25:54.522490 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:25:54 crc kubenswrapper[4687]: I1125 09:25:54.554120 4687 scope.go:117] "RemoveContainer" containerID="8105cb68e02ead8876e028d055fe33bca39e940ab7c9f1bb316ffa57ed435646" Nov 25 09:25:54 crc kubenswrapper[4687]: I1125 09:25:54.574367 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.364502295 podStartE2EDuration="5.574344168s" podCreationTimestamp="2025-11-25 09:25:49 +0000 UTC" firstStartedPulling="2025-11-25 09:25:50.419631989 +0000 UTC m=+1345.473271707" lastFinishedPulling="2025-11-25 09:25:53.629473862 +0000 UTC m=+1348.683113580" observedRunningTime="2025-11-25 09:25:54.551721441 +0000 UTC m=+1349.605361179" watchObservedRunningTime="2025-11-25 09:25:54.574344168 +0000 UTC m=+1349.627983886" Nov 25 09:25:54 crc kubenswrapper[4687]: I1125 09:25:54.589601 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-67tr7"] Nov 25 09:25:54 crc kubenswrapper[4687]: I1125 09:25:54.601944 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-67tr7"] Nov 25 09:25:55 crc kubenswrapper[4687]: I1125 09:25:55.747119 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbf632e8-4b43-4ff2-991b-94111453b58b" path="/var/lib/kubelet/pods/dbf632e8-4b43-4ff2-991b-94111453b58b/volumes" Nov 25 09:25:56 crc kubenswrapper[4687]: I1125 09:25:56.566309 4687 generic.go:334] "Generic (PLEG): container finished" podID="5f5e992c-a5a9-437d-9f18-89684260190c" containerID="68b8ca17adfd03dd0df30f5ddfc10ae586ef55f2e271a15face759ef764d9fa0" exitCode=0 Nov 25 09:25:56 crc kubenswrapper[4687]: I1125 09:25:56.566653 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bsrx2" event={"ID":"5f5e992c-a5a9-437d-9f18-89684260190c","Type":"ContainerDied","Data":"68b8ca17adfd03dd0df30f5ddfc10ae586ef55f2e271a15face759ef764d9fa0"} Nov 25 09:25:57 crc kubenswrapper[4687]: I1125 09:25:57.972839 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.167256 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-config-data\") pod \"5f5e992c-a5a9-437d-9f18-89684260190c\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.167328 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qltlw\" (UniqueName: \"kubernetes.io/projected/5f5e992c-a5a9-437d-9f18-89684260190c-kube-api-access-qltlw\") pod \"5f5e992c-a5a9-437d-9f18-89684260190c\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.167361 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-combined-ca-bundle\") pod \"5f5e992c-a5a9-437d-9f18-89684260190c\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.167382 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-scripts\") pod \"5f5e992c-a5a9-437d-9f18-89684260190c\" (UID: \"5f5e992c-a5a9-437d-9f18-89684260190c\") " Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.173326 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-scripts" (OuterVolumeSpecName: "scripts") pod "5f5e992c-a5a9-437d-9f18-89684260190c" (UID: "5f5e992c-a5a9-437d-9f18-89684260190c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.173390 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f5e992c-a5a9-437d-9f18-89684260190c-kube-api-access-qltlw" (OuterVolumeSpecName: "kube-api-access-qltlw") pod "5f5e992c-a5a9-437d-9f18-89684260190c" (UID: "5f5e992c-a5a9-437d-9f18-89684260190c"). InnerVolumeSpecName "kube-api-access-qltlw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.194628 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5f5e992c-a5a9-437d-9f18-89684260190c" (UID: "5f5e992c-a5a9-437d-9f18-89684260190c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.209850 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-config-data" (OuterVolumeSpecName: "config-data") pod "5f5e992c-a5a9-437d-9f18-89684260190c" (UID: "5f5e992c-a5a9-437d-9f18-89684260190c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.268994 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.269044 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qltlw\" (UniqueName: \"kubernetes.io/projected/5f5e992c-a5a9-437d-9f18-89684260190c-kube-api-access-qltlw\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.269055 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.269063 4687 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f5e992c-a5a9-437d-9f18-89684260190c-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.527870 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-bccf8f775-67tr7" podUID="dbf632e8-4b43-4ff2-991b-94111453b58b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.185:5353: i/o timeout" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.593550 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bsrx2" event={"ID":"5f5e992c-a5a9-437d-9f18-89684260190c","Type":"ContainerDied","Data":"7cb2ccc3cc2ec34cb9dc62ba09570ce82c6ac1331ea7f6193bb064ca61477c30"} Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.593589 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cb2ccc3cc2ec34cb9dc62ba09570ce82c6ac1331ea7f6193bb064ca61477c30" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.593640 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bsrx2" Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.768872 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.769216 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cee1023b-62ef-498d-9603-3a01d75f4971" containerName="nova-api-log" containerID="cri-o://b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95" gracePeriod=30 Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.769282 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cee1023b-62ef-498d-9603-3a01d75f4971" containerName="nova-api-api" containerID="cri-o://9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f" gracePeriod=30 Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.789046 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.789310 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3" containerName="nova-scheduler-scheduler" containerID="cri-o://fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271" gracePeriod=30 Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.851104 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.851564 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-log" containerID="cri-o://1331baeb55abad14196e470d7b417e6be7d817595861f55e802c6414ea65d7b9" gracePeriod=30 Nov 25 09:25:58 crc kubenswrapper[4687]: I1125 09:25:58.851740 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-metadata" containerID="cri-o://62444ed88a14d68e2d69dc116fddc906b095f8d99bbbe82c863affa9c5a51108" gracePeriod=30 Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.376395 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.494187 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-public-tls-certs\") pod \"cee1023b-62ef-498d-9603-3a01d75f4971\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.494329 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-internal-tls-certs\") pod \"cee1023b-62ef-498d-9603-3a01d75f4971\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.494424 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cee1023b-62ef-498d-9603-3a01d75f4971-logs\") pod \"cee1023b-62ef-498d-9603-3a01d75f4971\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.494560 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-combined-ca-bundle\") pod \"cee1023b-62ef-498d-9603-3a01d75f4971\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.494719 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vzsw\" (UniqueName: \"kubernetes.io/projected/cee1023b-62ef-498d-9603-3a01d75f4971-kube-api-access-5vzsw\") pod \"cee1023b-62ef-498d-9603-3a01d75f4971\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.494769 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-config-data\") pod \"cee1023b-62ef-498d-9603-3a01d75f4971\" (UID: \"cee1023b-62ef-498d-9603-3a01d75f4971\") " Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.506441 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cee1023b-62ef-498d-9603-3a01d75f4971-logs" (OuterVolumeSpecName: "logs") pod "cee1023b-62ef-498d-9603-3a01d75f4971" (UID: "cee1023b-62ef-498d-9603-3a01d75f4971"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.532553 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cee1023b-62ef-498d-9603-3a01d75f4971-kube-api-access-5vzsw" (OuterVolumeSpecName: "kube-api-access-5vzsw") pod "cee1023b-62ef-498d-9603-3a01d75f4971" (UID: "cee1023b-62ef-498d-9603-3a01d75f4971"). InnerVolumeSpecName "kube-api-access-5vzsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.545405 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-config-data" (OuterVolumeSpecName: "config-data") pod "cee1023b-62ef-498d-9603-3a01d75f4971" (UID: "cee1023b-62ef-498d-9603-3a01d75f4971"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.558803 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cee1023b-62ef-498d-9603-3a01d75f4971" (UID: "cee1023b-62ef-498d-9603-3a01d75f4971"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.578296 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cee1023b-62ef-498d-9603-3a01d75f4971" (UID: "cee1023b-62ef-498d-9603-3a01d75f4971"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.596493 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vzsw\" (UniqueName: \"kubernetes.io/projected/cee1023b-62ef-498d-9603-3a01d75f4971-kube-api-access-5vzsw\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.596557 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.596570 4687 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.596581 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cee1023b-62ef-498d-9603-3a01d75f4971-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.596592 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.610643 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cee1023b-62ef-498d-9603-3a01d75f4971" (UID: "cee1023b-62ef-498d-9603-3a01d75f4971"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.611606 4687 generic.go:334] "Generic (PLEG): container finished" podID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerID="1331baeb55abad14196e470d7b417e6be7d817595861f55e802c6414ea65d7b9" exitCode=143 Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.611674 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"663335a7-77d3-4114-b2c4-7deb73b7e11d","Type":"ContainerDied","Data":"1331baeb55abad14196e470d7b417e6be7d817595861f55e802c6414ea65d7b9"} Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.614984 4687 generic.go:334] "Generic (PLEG): container finished" podID="cee1023b-62ef-498d-9603-3a01d75f4971" containerID="9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f" exitCode=0 Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.615009 4687 generic.go:334] "Generic (PLEG): container finished" podID="cee1023b-62ef-498d-9603-3a01d75f4971" containerID="b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95" exitCode=143 Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.615028 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cee1023b-62ef-498d-9603-3a01d75f4971","Type":"ContainerDied","Data":"9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f"} Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.615051 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cee1023b-62ef-498d-9603-3a01d75f4971","Type":"ContainerDied","Data":"b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95"} Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.615063 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cee1023b-62ef-498d-9603-3a01d75f4971","Type":"ContainerDied","Data":"c99d445bd40d7eb9a9a3f5cdce6cbdd342a0cec2548ac36fbb6e4d522905970e"} Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.615077 4687 scope.go:117] "RemoveContainer" containerID="9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.615240 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.664919 4687 scope.go:117] "RemoveContainer" containerID="b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.686561 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.697910 4687 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee1023b-62ef-498d-9603-3a01d75f4971-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.708662 4687 scope.go:117] "RemoveContainer" containerID="9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.708792 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:59 crc kubenswrapper[4687]: E1125 09:25:59.709637 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f\": container with ID starting with 9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f not found: ID does not exist" containerID="9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.709670 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f"} err="failed to get container status \"9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f\": rpc error: code = NotFound desc = could not find container \"9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f\": container with ID starting with 9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f not found: ID does not exist" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.709691 4687 scope.go:117] "RemoveContainer" containerID="b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95" Nov 25 09:25:59 crc kubenswrapper[4687]: E1125 09:25:59.710181 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95\": container with ID starting with b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95 not found: ID does not exist" containerID="b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.710204 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95"} err="failed to get container status \"b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95\": rpc error: code = NotFound desc = could not find container \"b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95\": container with ID starting with b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95 not found: ID does not exist" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.710219 4687 scope.go:117] "RemoveContainer" containerID="9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.710461 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f"} err="failed to get container status \"9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f\": rpc error: code = NotFound desc = could not find container \"9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f\": container with ID starting with 9bded74d1426651699fcd9df290cda70fb502c9c6d9edfe529eeae226b91c76f not found: ID does not exist" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.710479 4687 scope.go:117] "RemoveContainer" containerID="b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.710814 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95"} err="failed to get container status \"b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95\": rpc error: code = NotFound desc = could not find container \"b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95\": container with ID starting with b9ad6434f335c71ebfe555c026a9adda3a181f70c819841ee5d1e78829468c95 not found: ID does not exist" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.721828 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:59 crc kubenswrapper[4687]: E1125 09:25:59.722256 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbf632e8-4b43-4ff2-991b-94111453b58b" containerName="init" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.722279 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbf632e8-4b43-4ff2-991b-94111453b58b" containerName="init" Nov 25 09:25:59 crc kubenswrapper[4687]: E1125 09:25:59.722294 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbf632e8-4b43-4ff2-991b-94111453b58b" containerName="dnsmasq-dns" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.722302 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbf632e8-4b43-4ff2-991b-94111453b58b" containerName="dnsmasq-dns" Nov 25 09:25:59 crc kubenswrapper[4687]: E1125 09:25:59.722314 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cee1023b-62ef-498d-9603-3a01d75f4971" containerName="nova-api-api" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.722322 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="cee1023b-62ef-498d-9603-3a01d75f4971" containerName="nova-api-api" Nov 25 09:25:59 crc kubenswrapper[4687]: E1125 09:25:59.722345 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f5e992c-a5a9-437d-9f18-89684260190c" containerName="nova-manage" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.722353 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f5e992c-a5a9-437d-9f18-89684260190c" containerName="nova-manage" Nov 25 09:25:59 crc kubenswrapper[4687]: E1125 09:25:59.722395 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cee1023b-62ef-498d-9603-3a01d75f4971" containerName="nova-api-log" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.722405 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="cee1023b-62ef-498d-9603-3a01d75f4971" containerName="nova-api-log" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.722824 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbf632e8-4b43-4ff2-991b-94111453b58b" containerName="dnsmasq-dns" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.722848 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="cee1023b-62ef-498d-9603-3a01d75f4971" containerName="nova-api-log" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.722872 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f5e992c-a5a9-437d-9f18-89684260190c" containerName="nova-manage" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.722882 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="cee1023b-62ef-498d-9603-3a01d75f4971" containerName="nova-api-api" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.725218 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.734450 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.735389 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.735731 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.751855 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cee1023b-62ef-498d-9603-3a01d75f4971" path="/var/lib/kubelet/pods/cee1023b-62ef-498d-9603-3a01d75f4971/volumes" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.752967 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.902854 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzplg\" (UniqueName: \"kubernetes.io/projected/8ef7fc72-708b-4994-9ced-44ec353121fc-kube-api-access-mzplg\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.902951 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-internal-tls-certs\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.903166 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-public-tls-certs\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.903236 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ef7fc72-708b-4994-9ced-44ec353121fc-logs\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.903310 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:25:59 crc kubenswrapper[4687]: I1125 09:25:59.903331 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-config-data\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.007706 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-public-tls-certs\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.007775 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ef7fc72-708b-4994-9ced-44ec353121fc-logs\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.007826 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.007850 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-config-data\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.007950 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzplg\" (UniqueName: \"kubernetes.io/projected/8ef7fc72-708b-4994-9ced-44ec353121fc-kube-api-access-mzplg\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.007995 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-internal-tls-certs\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.008861 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ef7fc72-708b-4994-9ced-44ec353121fc-logs\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.015074 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-config-data\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.015415 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-internal-tls-certs\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.016318 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.027307 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzplg\" (UniqueName: \"kubernetes.io/projected/8ef7fc72-708b-4994-9ced-44ec353121fc-kube-api-access-mzplg\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.028309 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ef7fc72-708b-4994-9ced-44ec353121fc-public-tls-certs\") pod \"nova-api-0\" (UID: \"8ef7fc72-708b-4994-9ced-44ec353121fc\") " pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.123126 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.145014 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.210174 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-config-data\") pod \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.210250 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bw6x\" (UniqueName: \"kubernetes.io/projected/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-kube-api-access-8bw6x\") pod \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.210418 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-combined-ca-bundle\") pod \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\" (UID: \"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3\") " Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.221869 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-kube-api-access-8bw6x" (OuterVolumeSpecName: "kube-api-access-8bw6x") pod "c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3" (UID: "c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3"). InnerVolumeSpecName "kube-api-access-8bw6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.251661 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3" (UID: "c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.251728 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-config-data" (OuterVolumeSpecName: "config-data") pod "c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3" (UID: "c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.313030 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.313070 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bw6x\" (UniqueName: \"kubernetes.io/projected/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-kube-api-access-8bw6x\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.313084 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.629425 4687 generic.go:334] "Generic (PLEG): container finished" podID="c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3" containerID="fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271" exitCode=0 Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.629478 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3","Type":"ContainerDied","Data":"fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271"} Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.629528 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3","Type":"ContainerDied","Data":"ace1d20e70f3897aee39ca79a8efbcedb961a1bb4886990fa7a27ee8dce4a925"} Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.629552 4687 scope.go:117] "RemoveContainer" containerID="fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.629660 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.658857 4687 scope.go:117] "RemoveContainer" containerID="fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271" Nov 25 09:26:00 crc kubenswrapper[4687]: E1125 09:26:00.659324 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271\": container with ID starting with fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271 not found: ID does not exist" containerID="fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.659366 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271"} err="failed to get container status \"fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271\": rpc error: code = NotFound desc = could not find container \"fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271\": container with ID starting with fbe135abd5cf9331b1a53c3caa340ac3e532d54cb11a8cc877bb2bf8d00fd271 not found: ID does not exist" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.670155 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.682587 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.698747 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:26:00 crc kubenswrapper[4687]: E1125 09:26:00.699258 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3" containerName="nova-scheduler-scheduler" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.699301 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3" containerName="nova-scheduler-scheduler" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.699493 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3" containerName="nova-scheduler-scheduler" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.700281 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.702995 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.709450 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.745097 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:26:00 crc kubenswrapper[4687]: W1125 09:26:00.755665 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ef7fc72_708b_4994_9ced_44ec353121fc.slice/crio-345674e135606b0060dffcc0f7de441de5d2bcdf76b41c56ffbcb9698b5338e2 WatchSource:0}: Error finding container 345674e135606b0060dffcc0f7de441de5d2bcdf76b41c56ffbcb9698b5338e2: Status 404 returned error can't find the container with id 345674e135606b0060dffcc0f7de441de5d2bcdf76b41c56ffbcb9698b5338e2 Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.822847 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qr9m4\" (UniqueName: \"kubernetes.io/projected/9e4aadac-caf5-4702-98c7-648843339aa5-kube-api-access-qr9m4\") pod \"nova-scheduler-0\" (UID: \"9e4aadac-caf5-4702-98c7-648843339aa5\") " pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.822900 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e4aadac-caf5-4702-98c7-648843339aa5-config-data\") pod \"nova-scheduler-0\" (UID: \"9e4aadac-caf5-4702-98c7-648843339aa5\") " pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.823179 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e4aadac-caf5-4702-98c7-648843339aa5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9e4aadac-caf5-4702-98c7-648843339aa5\") " pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.924850 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e4aadac-caf5-4702-98c7-648843339aa5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9e4aadac-caf5-4702-98c7-648843339aa5\") " pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.924980 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qr9m4\" (UniqueName: \"kubernetes.io/projected/9e4aadac-caf5-4702-98c7-648843339aa5-kube-api-access-qr9m4\") pod \"nova-scheduler-0\" (UID: \"9e4aadac-caf5-4702-98c7-648843339aa5\") " pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.925016 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e4aadac-caf5-4702-98c7-648843339aa5-config-data\") pod \"nova-scheduler-0\" (UID: \"9e4aadac-caf5-4702-98c7-648843339aa5\") " pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.928410 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e4aadac-caf5-4702-98c7-648843339aa5-config-data\") pod \"nova-scheduler-0\" (UID: \"9e4aadac-caf5-4702-98c7-648843339aa5\") " pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.929171 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e4aadac-caf5-4702-98c7-648843339aa5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9e4aadac-caf5-4702-98c7-648843339aa5\") " pod="openstack/nova-scheduler-0" Nov 25 09:26:00 crc kubenswrapper[4687]: I1125 09:26:00.950792 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qr9m4\" (UniqueName: \"kubernetes.io/projected/9e4aadac-caf5-4702-98c7-648843339aa5-kube-api-access-qr9m4\") pod \"nova-scheduler-0\" (UID: \"9e4aadac-caf5-4702-98c7-648843339aa5\") " pod="openstack/nova-scheduler-0" Nov 25 09:26:01 crc kubenswrapper[4687]: I1125 09:26:01.020270 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:26:01 crc kubenswrapper[4687]: I1125 09:26:01.463879 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:26:01 crc kubenswrapper[4687]: I1125 09:26:01.642811 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8ef7fc72-708b-4994-9ced-44ec353121fc","Type":"ContainerStarted","Data":"cdbc1ccfbff5d4fb82c37b27c51efd339ce507e76105e992a3064113485e2f73"} Nov 25 09:26:01 crc kubenswrapper[4687]: I1125 09:26:01.642866 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8ef7fc72-708b-4994-9ced-44ec353121fc","Type":"ContainerStarted","Data":"a27b50b8a6d6ff514ed2c87212c00b1e2ea2296b8832075980741852a73d3aa0"} Nov 25 09:26:01 crc kubenswrapper[4687]: I1125 09:26:01.642880 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8ef7fc72-708b-4994-9ced-44ec353121fc","Type":"ContainerStarted","Data":"345674e135606b0060dffcc0f7de441de5d2bcdf76b41c56ffbcb9698b5338e2"} Nov 25 09:26:01 crc kubenswrapper[4687]: I1125 09:26:01.644184 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9e4aadac-caf5-4702-98c7-648843339aa5","Type":"ContainerStarted","Data":"a557b994410e93f7c9b8ac27100956426b3ec77ef072f7dd77addbbf52e3bc75"} Nov 25 09:26:01 crc kubenswrapper[4687]: I1125 09:26:01.662267 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.662250792 podStartE2EDuration="2.662250792s" podCreationTimestamp="2025-11-25 09:25:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:26:01.659667963 +0000 UTC m=+1356.713307681" watchObservedRunningTime="2025-11-25 09:26:01.662250792 +0000 UTC m=+1356.715890510" Nov 25 09:26:01 crc kubenswrapper[4687]: I1125 09:26:01.762600 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3" path="/var/lib/kubelet/pods/c5b1d2e6-ef83-4649-b54e-23e7ac8e35d3/volumes" Nov 25 09:26:01 crc kubenswrapper[4687]: I1125 09:26:01.986887 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": read tcp 10.217.0.2:47508->10.217.0.188:8775: read: connection reset by peer" Nov 25 09:26:01 crc kubenswrapper[4687]: I1125 09:26:01.986919 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": read tcp 10.217.0.2:47520->10.217.0.188:8775: read: connection reset by peer" Nov 25 09:26:02 crc kubenswrapper[4687]: I1125 09:26:02.656450 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9e4aadac-caf5-4702-98c7-648843339aa5","Type":"ContainerStarted","Data":"e5bef073d42742dc1b3e8ac4ea41a22ec75446136dda4362087166f8164bfdd0"} Nov 25 09:26:02 crc kubenswrapper[4687]: I1125 09:26:02.661874 4687 generic.go:334] "Generic (PLEG): container finished" podID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerID="62444ed88a14d68e2d69dc116fddc906b095f8d99bbbe82c863affa9c5a51108" exitCode=0 Nov 25 09:26:02 crc kubenswrapper[4687]: I1125 09:26:02.662008 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"663335a7-77d3-4114-b2c4-7deb73b7e11d","Type":"ContainerDied","Data":"62444ed88a14d68e2d69dc116fddc906b095f8d99bbbe82c863affa9c5a51108"} Nov 25 09:26:02 crc kubenswrapper[4687]: I1125 09:26:02.679388 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.679371679 podStartE2EDuration="2.679371679s" podCreationTimestamp="2025-11-25 09:26:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:26:02.675087473 +0000 UTC m=+1357.728727211" watchObservedRunningTime="2025-11-25 09:26:02.679371679 +0000 UTC m=+1357.733011397" Nov 25 09:26:02 crc kubenswrapper[4687]: I1125 09:26:02.972326 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.072165 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/663335a7-77d3-4114-b2c4-7deb73b7e11d-logs\") pod \"663335a7-77d3-4114-b2c4-7deb73b7e11d\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.072677 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vktm9\" (UniqueName: \"kubernetes.io/projected/663335a7-77d3-4114-b2c4-7deb73b7e11d-kube-api-access-vktm9\") pod \"663335a7-77d3-4114-b2c4-7deb73b7e11d\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.072737 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-combined-ca-bundle\") pod \"663335a7-77d3-4114-b2c4-7deb73b7e11d\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.072806 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-config-data\") pod \"663335a7-77d3-4114-b2c4-7deb73b7e11d\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.072879 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-nova-metadata-tls-certs\") pod \"663335a7-77d3-4114-b2c4-7deb73b7e11d\" (UID: \"663335a7-77d3-4114-b2c4-7deb73b7e11d\") " Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.073333 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/663335a7-77d3-4114-b2c4-7deb73b7e11d-logs" (OuterVolumeSpecName: "logs") pod "663335a7-77d3-4114-b2c4-7deb73b7e11d" (UID: "663335a7-77d3-4114-b2c4-7deb73b7e11d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.073548 4687 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/663335a7-77d3-4114-b2c4-7deb73b7e11d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.094828 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/663335a7-77d3-4114-b2c4-7deb73b7e11d-kube-api-access-vktm9" (OuterVolumeSpecName: "kube-api-access-vktm9") pod "663335a7-77d3-4114-b2c4-7deb73b7e11d" (UID: "663335a7-77d3-4114-b2c4-7deb73b7e11d"). InnerVolumeSpecName "kube-api-access-vktm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.103065 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-config-data" (OuterVolumeSpecName: "config-data") pod "663335a7-77d3-4114-b2c4-7deb73b7e11d" (UID: "663335a7-77d3-4114-b2c4-7deb73b7e11d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.119754 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "663335a7-77d3-4114-b2c4-7deb73b7e11d" (UID: "663335a7-77d3-4114-b2c4-7deb73b7e11d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.129881 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "663335a7-77d3-4114-b2c4-7deb73b7e11d" (UID: "663335a7-77d3-4114-b2c4-7deb73b7e11d"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.175355 4687 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.175387 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vktm9\" (UniqueName: \"kubernetes.io/projected/663335a7-77d3-4114-b2c4-7deb73b7e11d-kube-api-access-vktm9\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.175396 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.175405 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/663335a7-77d3-4114-b2c4-7deb73b7e11d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.671901 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"663335a7-77d3-4114-b2c4-7deb73b7e11d","Type":"ContainerDied","Data":"320cde6345c972701d78c75031a5b3ca96edefb530aa826b8f8950f1be43aa25"} Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.671953 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.671962 4687 scope.go:117] "RemoveContainer" containerID="62444ed88a14d68e2d69dc116fddc906b095f8d99bbbe82c863affa9c5a51108" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.706339 4687 scope.go:117] "RemoveContainer" containerID="1331baeb55abad14196e470d7b417e6be7d817595861f55e802c6414ea65d7b9" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.708574 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.729414 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.750440 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" path="/var/lib/kubelet/pods/663335a7-77d3-4114-b2c4-7deb73b7e11d/volumes" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.751337 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:26:03 crc kubenswrapper[4687]: E1125 09:26:03.751813 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-metadata" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.751919 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-metadata" Nov 25 09:26:03 crc kubenswrapper[4687]: E1125 09:26:03.752011 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-log" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.752084 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-log" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.752354 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-log" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.752441 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="663335a7-77d3-4114-b2c4-7deb73b7e11d" containerName="nova-metadata-metadata" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.753937 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.757729 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.757809 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.777338 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.888092 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/28054f13-f14d-47dd-a07f-2e56cd710565-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.888276 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28054f13-f14d-47dd-a07f-2e56cd710565-config-data\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.888338 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28054f13-f14d-47dd-a07f-2e56cd710565-logs\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.888479 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh78d\" (UniqueName: \"kubernetes.io/projected/28054f13-f14d-47dd-a07f-2e56cd710565-kube-api-access-xh78d\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.888639 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28054f13-f14d-47dd-a07f-2e56cd710565-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.991035 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28054f13-f14d-47dd-a07f-2e56cd710565-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.991753 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/28054f13-f14d-47dd-a07f-2e56cd710565-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.991968 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28054f13-f14d-47dd-a07f-2e56cd710565-config-data\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.992113 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28054f13-f14d-47dd-a07f-2e56cd710565-logs\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.992272 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh78d\" (UniqueName: \"kubernetes.io/projected/28054f13-f14d-47dd-a07f-2e56cd710565-kube-api-access-xh78d\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.992440 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28054f13-f14d-47dd-a07f-2e56cd710565-logs\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.995351 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/28054f13-f14d-47dd-a07f-2e56cd710565-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:03 crc kubenswrapper[4687]: I1125 09:26:03.995919 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28054f13-f14d-47dd-a07f-2e56cd710565-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:04 crc kubenswrapper[4687]: I1125 09:26:04.006579 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28054f13-f14d-47dd-a07f-2e56cd710565-config-data\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:04 crc kubenswrapper[4687]: I1125 09:26:04.013887 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh78d\" (UniqueName: \"kubernetes.io/projected/28054f13-f14d-47dd-a07f-2e56cd710565-kube-api-access-xh78d\") pod \"nova-metadata-0\" (UID: \"28054f13-f14d-47dd-a07f-2e56cd710565\") " pod="openstack/nova-metadata-0" Nov 25 09:26:04 crc kubenswrapper[4687]: I1125 09:26:04.081627 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:26:04 crc kubenswrapper[4687]: I1125 09:26:04.584344 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:26:04 crc kubenswrapper[4687]: I1125 09:26:04.683404 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"28054f13-f14d-47dd-a07f-2e56cd710565","Type":"ContainerStarted","Data":"cc73b91f42948b351f1aebb900747d685668663f304e0e523957a50a695f5d0b"} Nov 25 09:26:05 crc kubenswrapper[4687]: I1125 09:26:05.695877 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"28054f13-f14d-47dd-a07f-2e56cd710565","Type":"ContainerStarted","Data":"cd62629ff2c52927b6b223eaae4bd08e23b07215c20a8f779c8b1e77528d6fcc"} Nov 25 09:26:05 crc kubenswrapper[4687]: I1125 09:26:05.696296 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"28054f13-f14d-47dd-a07f-2e56cd710565","Type":"ContainerStarted","Data":"e53c452fcf0ac43dce04277fc4b040e43f63c391f3377dbd4e9d408bbd7c077d"} Nov 25 09:26:05 crc kubenswrapper[4687]: I1125 09:26:05.723188 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.723163443 podStartE2EDuration="2.723163443s" podCreationTimestamp="2025-11-25 09:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:26:05.715317748 +0000 UTC m=+1360.768957476" watchObservedRunningTime="2025-11-25 09:26:05.723163443 +0000 UTC m=+1360.776803181" Nov 25 09:26:06 crc kubenswrapper[4687]: I1125 09:26:06.021771 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 09:26:09 crc kubenswrapper[4687]: I1125 09:26:09.082836 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 09:26:09 crc kubenswrapper[4687]: I1125 09:26:09.083483 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 09:26:10 crc kubenswrapper[4687]: I1125 09:26:10.145664 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:26:10 crc kubenswrapper[4687]: I1125 09:26:10.145749 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:26:11 crc kubenswrapper[4687]: I1125 09:26:11.020644 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 09:26:11 crc kubenswrapper[4687]: I1125 09:26:11.073926 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 09:26:11 crc kubenswrapper[4687]: I1125 09:26:11.159759 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8ef7fc72-708b-4994-9ced-44ec353121fc" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:26:11 crc kubenswrapper[4687]: I1125 09:26:11.159858 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8ef7fc72-708b-4994-9ced-44ec353121fc" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:26:11 crc kubenswrapper[4687]: I1125 09:26:11.782046 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 09:26:14 crc kubenswrapper[4687]: I1125 09:26:14.082887 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 09:26:14 crc kubenswrapper[4687]: I1125 09:26:14.083230 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 09:26:15 crc kubenswrapper[4687]: I1125 09:26:15.095656 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="28054f13-f14d-47dd-a07f-2e56cd710565" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:26:15 crc kubenswrapper[4687]: I1125 09:26:15.095723 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="28054f13-f14d-47dd-a07f-2e56cd710565" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:26:19 crc kubenswrapper[4687]: I1125 09:26:19.898468 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 09:26:20 crc kubenswrapper[4687]: I1125 09:26:20.152986 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 09:26:20 crc kubenswrapper[4687]: I1125 09:26:20.153335 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 09:26:20 crc kubenswrapper[4687]: I1125 09:26:20.158269 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 09:26:20 crc kubenswrapper[4687]: I1125 09:26:20.158596 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 09:26:20 crc kubenswrapper[4687]: I1125 09:26:20.839723 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 09:26:20 crc kubenswrapper[4687]: I1125 09:26:20.846402 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 09:26:24 crc kubenswrapper[4687]: I1125 09:26:24.088101 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 09:26:24 crc kubenswrapper[4687]: I1125 09:26:24.090054 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 09:26:24 crc kubenswrapper[4687]: I1125 09:26:24.093793 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 09:26:24 crc kubenswrapper[4687]: I1125 09:26:24.880788 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.273809 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gc2hh"] Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.276059 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.292979 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gc2hh"] Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.384350 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-catalog-content\") pod \"community-operators-gc2hh\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.384824 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-utilities\") pod \"community-operators-gc2hh\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.384850 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mz5bk\" (UniqueName: \"kubernetes.io/projected/b45af2b9-2709-424d-add3-a30bbd8aea7c-kube-api-access-mz5bk\") pod \"community-operators-gc2hh\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.486153 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-catalog-content\") pod \"community-operators-gc2hh\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.486283 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-utilities\") pod \"community-operators-gc2hh\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.486307 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mz5bk\" (UniqueName: \"kubernetes.io/projected/b45af2b9-2709-424d-add3-a30bbd8aea7c-kube-api-access-mz5bk\") pod \"community-operators-gc2hh\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.486672 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-catalog-content\") pod \"community-operators-gc2hh\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.486810 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-utilities\") pod \"community-operators-gc2hh\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.511449 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mz5bk\" (UniqueName: \"kubernetes.io/projected/b45af2b9-2709-424d-add3-a30bbd8aea7c-kube-api-access-mz5bk\") pod \"community-operators-gc2hh\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:25 crc kubenswrapper[4687]: I1125 09:26:25.602421 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:26 crc kubenswrapper[4687]: I1125 09:26:26.146213 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gc2hh"] Nov 25 09:26:26 crc kubenswrapper[4687]: W1125 09:26:26.146854 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb45af2b9_2709_424d_add3_a30bbd8aea7c.slice/crio-c72e000c2d2bb5907065db2ef8a2711d3b0241d92bad9f425d03cec8f7313a42 WatchSource:0}: Error finding container c72e000c2d2bb5907065db2ef8a2711d3b0241d92bad9f425d03cec8f7313a42: Status 404 returned error can't find the container with id c72e000c2d2bb5907065db2ef8a2711d3b0241d92bad9f425d03cec8f7313a42 Nov 25 09:26:26 crc kubenswrapper[4687]: I1125 09:26:26.895474 4687 generic.go:334] "Generic (PLEG): container finished" podID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerID="f5558190d967a43fc6eb332592fe126f5ef22e7381700dc3c7f966ddee3d79b5" exitCode=0 Nov 25 09:26:26 crc kubenswrapper[4687]: I1125 09:26:26.895540 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gc2hh" event={"ID":"b45af2b9-2709-424d-add3-a30bbd8aea7c","Type":"ContainerDied","Data":"f5558190d967a43fc6eb332592fe126f5ef22e7381700dc3c7f966ddee3d79b5"} Nov 25 09:26:26 crc kubenswrapper[4687]: I1125 09:26:26.895599 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gc2hh" event={"ID":"b45af2b9-2709-424d-add3-a30bbd8aea7c","Type":"ContainerStarted","Data":"c72e000c2d2bb5907065db2ef8a2711d3b0241d92bad9f425d03cec8f7313a42"} Nov 25 09:26:26 crc kubenswrapper[4687]: I1125 09:26:26.897245 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.465636 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7wngm"] Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.468121 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.481084 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7wngm"] Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.585750 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-utilities\") pod \"redhat-operators-7wngm\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.585817 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8jl8\" (UniqueName: \"kubernetes.io/projected/2409926b-0e0e-4de9-87f9-b61caf4f7c31-kube-api-access-t8jl8\") pod \"redhat-operators-7wngm\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.585998 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-catalog-content\") pod \"redhat-operators-7wngm\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.687583 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-utilities\") pod \"redhat-operators-7wngm\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.687664 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8jl8\" (UniqueName: \"kubernetes.io/projected/2409926b-0e0e-4de9-87f9-b61caf4f7c31-kube-api-access-t8jl8\") pod \"redhat-operators-7wngm\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.687723 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-catalog-content\") pod \"redhat-operators-7wngm\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.688113 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-utilities\") pod \"redhat-operators-7wngm\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.688247 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-catalog-content\") pod \"redhat-operators-7wngm\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.707455 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8jl8\" (UniqueName: \"kubernetes.io/projected/2409926b-0e0e-4de9-87f9-b61caf4f7c31-kube-api-access-t8jl8\") pod \"redhat-operators-7wngm\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.788437 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:29 crc kubenswrapper[4687]: I1125 09:26:29.922610 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gc2hh" event={"ID":"b45af2b9-2709-424d-add3-a30bbd8aea7c","Type":"ContainerStarted","Data":"80aaef569dd0c18f55a4ce7a932c3f7dcb86ecc1634eff3ec3bb1ebc0e9ed8c4"} Nov 25 09:26:30 crc kubenswrapper[4687]: I1125 09:26:30.887065 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7wngm"] Nov 25 09:26:30 crc kubenswrapper[4687]: I1125 09:26:30.937160 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wngm" event={"ID":"2409926b-0e0e-4de9-87f9-b61caf4f7c31","Type":"ContainerStarted","Data":"38fe1164305419b014df447a79e4cda675aefa3bc6f62ae85511f91713dd2f50"} Nov 25 09:26:30 crc kubenswrapper[4687]: I1125 09:26:30.940699 4687 generic.go:334] "Generic (PLEG): container finished" podID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerID="80aaef569dd0c18f55a4ce7a932c3f7dcb86ecc1634eff3ec3bb1ebc0e9ed8c4" exitCode=0 Nov 25 09:26:30 crc kubenswrapper[4687]: I1125 09:26:30.940736 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gc2hh" event={"ID":"b45af2b9-2709-424d-add3-a30bbd8aea7c","Type":"ContainerDied","Data":"80aaef569dd0c18f55a4ce7a932c3f7dcb86ecc1634eff3ec3bb1ebc0e9ed8c4"} Nov 25 09:26:33 crc kubenswrapper[4687]: I1125 09:26:33.972604 4687 generic.go:334] "Generic (PLEG): container finished" podID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerID="07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2" exitCode=0 Nov 25 09:26:33 crc kubenswrapper[4687]: I1125 09:26:33.973037 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wngm" event={"ID":"2409926b-0e0e-4de9-87f9-b61caf4f7c31","Type":"ContainerDied","Data":"07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2"} Nov 25 09:26:34 crc kubenswrapper[4687]: I1125 09:26:34.064268 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:26:35 crc kubenswrapper[4687]: I1125 09:26:35.424237 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:26:39 crc kubenswrapper[4687]: I1125 09:26:39.159476 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" containerName="rabbitmq" containerID="cri-o://71e762c5d8e711770e4fd5b25b4c1dd54aa6f6da8ce4400c2e1b5875f3601c1b" gracePeriod=604795 Nov 25 09:26:39 crc kubenswrapper[4687]: I1125 09:26:39.421074 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" containerName="rabbitmq" containerID="cri-o://32e97fd39f4e2ee10c2ab632c1828fb5f358d328844833af75706ed6838b9409" gracePeriod=604797 Nov 25 09:26:43 crc kubenswrapper[4687]: I1125 09:26:43.059664 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gc2hh" event={"ID":"b45af2b9-2709-424d-add3-a30bbd8aea7c","Type":"ContainerStarted","Data":"a39d6635be424b9f4518b2eb71b0c95f2c3cf07a964996420d6fa062da141a40"} Nov 25 09:26:43 crc kubenswrapper[4687]: I1125 09:26:43.078272 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gc2hh" podStartSLOduration=4.746968392 podStartE2EDuration="18.078257728s" podCreationTimestamp="2025-11-25 09:26:25 +0000 UTC" firstStartedPulling="2025-11-25 09:26:26.897032242 +0000 UTC m=+1381.950671960" lastFinishedPulling="2025-11-25 09:26:40.228321558 +0000 UTC m=+1395.281961296" observedRunningTime="2025-11-25 09:26:43.077400185 +0000 UTC m=+1398.131039913" watchObservedRunningTime="2025-11-25 09:26:43.078257728 +0000 UTC m=+1398.131897446" Nov 25 09:26:43 crc kubenswrapper[4687]: I1125 09:26:43.427150 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Nov 25 09:26:43 crc kubenswrapper[4687]: I1125 09:26:43.740004 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Nov 25 09:26:45 crc kubenswrapper[4687]: I1125 09:26:45.081048 4687 generic.go:334] "Generic (PLEG): container finished" podID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerID="ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75" exitCode=0 Nov 25 09:26:45 crc kubenswrapper[4687]: I1125 09:26:45.081095 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wngm" event={"ID":"2409926b-0e0e-4de9-87f9-b61caf4f7c31","Type":"ContainerDied","Data":"ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75"} Nov 25 09:26:45 crc kubenswrapper[4687]: I1125 09:26:45.602542 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:45 crc kubenswrapper[4687]: I1125 09:26:45.602906 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:45 crc kubenswrapper[4687]: I1125 09:26:45.647912 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.553615 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d558885bc-6bz7h"] Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.555716 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.558407 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.566380 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-6bz7h"] Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.704850 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.704981 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.705025 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2mtt\" (UniqueName: \"kubernetes.io/projected/b864c704-e14e-4b2c-9da3-e93feb535259-kube-api-access-p2mtt\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.705053 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-svc\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.705078 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.705192 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-config\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.705244 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.807447 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-config\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.807568 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.807616 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.807690 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.807736 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2mtt\" (UniqueName: \"kubernetes.io/projected/b864c704-e14e-4b2c-9da3-e93feb535259-kube-api-access-p2mtt\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.807772 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-svc\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.807803 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.808649 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.808676 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-svc\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.808702 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.808804 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.808923 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.809085 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-config\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.832176 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2mtt\" (UniqueName: \"kubernetes.io/projected/b864c704-e14e-4b2c-9da3-e93feb535259-kube-api-access-p2mtt\") pod \"dnsmasq-dns-d558885bc-6bz7h\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:51 crc kubenswrapper[4687]: I1125 09:26:51.884479 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:52 crc kubenswrapper[4687]: I1125 09:26:52.185703 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wngm" event={"ID":"2409926b-0e0e-4de9-87f9-b61caf4f7c31","Type":"ContainerStarted","Data":"f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b"} Nov 25 09:26:52 crc kubenswrapper[4687]: I1125 09:26:52.336971 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-6bz7h"] Nov 25 09:26:53 crc kubenswrapper[4687]: I1125 09:26:53.196352 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" event={"ID":"b864c704-e14e-4b2c-9da3-e93feb535259","Type":"ContainerStarted","Data":"72abcfc8ad8c4ca4f7fae293a7355a64db88d9913a86d25fc0a2b49505b123b5"} Nov 25 09:26:53 crc kubenswrapper[4687]: I1125 09:26:53.196848 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" event={"ID":"b864c704-e14e-4b2c-9da3-e93feb535259","Type":"ContainerStarted","Data":"5272315b1d7738b6e496b89ed45cd67adce8a65f345ea3bea5865359c55abaad"} Nov 25 09:26:53 crc kubenswrapper[4687]: I1125 09:26:53.224579 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7wngm" podStartSLOduration=7.19369964 podStartE2EDuration="24.224555954s" podCreationTimestamp="2025-11-25 09:26:29 +0000 UTC" firstStartedPulling="2025-11-25 09:26:33.975459018 +0000 UTC m=+1389.029098756" lastFinishedPulling="2025-11-25 09:26:51.006315342 +0000 UTC m=+1406.059955070" observedRunningTime="2025-11-25 09:26:53.21413043 +0000 UTC m=+1408.267770148" watchObservedRunningTime="2025-11-25 09:26:53.224555954 +0000 UTC m=+1408.278195672" Nov 25 09:26:53 crc kubenswrapper[4687]: I1125 09:26:53.427472 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Nov 25 09:26:53 crc kubenswrapper[4687]: I1125 09:26:53.740104 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Nov 25 09:26:53 crc kubenswrapper[4687]: I1125 09:26:53.844394 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:26:53 crc kubenswrapper[4687]: I1125 09:26:53.844486 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:26:54 crc kubenswrapper[4687]: I1125 09:26:54.210943 4687 generic.go:334] "Generic (PLEG): container finished" podID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" containerID="32e97fd39f4e2ee10c2ab632c1828fb5f358d328844833af75706ed6838b9409" exitCode=0 Nov 25 09:26:54 crc kubenswrapper[4687]: I1125 09:26:54.210997 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7","Type":"ContainerDied","Data":"32e97fd39f4e2ee10c2ab632c1828fb5f358d328844833af75706ed6838b9409"} Nov 25 09:26:54 crc kubenswrapper[4687]: I1125 09:26:54.213596 4687 generic.go:334] "Generic (PLEG): container finished" podID="b864c704-e14e-4b2c-9da3-e93feb535259" containerID="72abcfc8ad8c4ca4f7fae293a7355a64db88d9913a86d25fc0a2b49505b123b5" exitCode=0 Nov 25 09:26:54 crc kubenswrapper[4687]: I1125 09:26:54.213664 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" event={"ID":"b864c704-e14e-4b2c-9da3-e93feb535259","Type":"ContainerDied","Data":"72abcfc8ad8c4ca4f7fae293a7355a64db88d9913a86d25fc0a2b49505b123b5"} Nov 25 09:26:54 crc kubenswrapper[4687]: I1125 09:26:54.925145 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.068941 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-confd\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.069045 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-plugins\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.069191 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-erlang-cookie\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.069758 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.069783 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.069841 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.069877 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-server-conf\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.070158 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-pod-info\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.070195 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-tls\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.070217 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-plugins-conf\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.070275 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-config-data\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.070317 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9v9wz\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-kube-api-access-9v9wz\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.070383 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-erlang-cookie-secret\") pod \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\" (UID: \"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.070919 4687 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.070947 4687 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.071966 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.074133 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.074845 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.075428 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-pod-info" (OuterVolumeSpecName: "pod-info") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.076875 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.080686 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-kube-api-access-9v9wz" (OuterVolumeSpecName: "kube-api-access-9v9wz") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "kube-api-access-9v9wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.104874 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-config-data" (OuterVolumeSpecName: "config-data") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.126376 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-server-conf" (OuterVolumeSpecName: "server-conf") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.173207 4687 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.173246 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.173256 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9v9wz\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-kube-api-access-9v9wz\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.173269 4687 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.173299 4687 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.173308 4687 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.173317 4687 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.173326 4687 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.186907 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" (UID: "abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.202161 4687 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.235672 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7","Type":"ContainerDied","Data":"ac983ac144d32c67ead6359f9ee51ca9dd33332ea2af263edb41894360eb49c0"} Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.236590 4687 scope.go:117] "RemoveContainer" containerID="32e97fd39f4e2ee10c2ab632c1828fb5f358d328844833af75706ed6838b9409" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.236712 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.242835 4687 generic.go:334] "Generic (PLEG): container finished" podID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" containerID="71e762c5d8e711770e4fd5b25b4c1dd54aa6f6da8ce4400c2e1b5875f3601c1b" exitCode=0 Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.242927 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee","Type":"ContainerDied","Data":"71e762c5d8e711770e4fd5b25b4c1dd54aa6f6da8ce4400c2e1b5875f3601c1b"} Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.245173 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" event={"ID":"b864c704-e14e-4b2c-9da3-e93feb535259","Type":"ContainerStarted","Data":"f1265df310ed4bf60d150ea56f50e9b96e9205ff332736b20cf4078997f754cc"} Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.269273 4687 scope.go:117] "RemoveContainer" containerID="c595258a961b41004a924e3c4310ee9e02391fc1013460452a08f5fb21a05e95" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.275933 4687 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.275960 4687 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.290118 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.299685 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.319510 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:26:55 crc kubenswrapper[4687]: E1125 09:26:55.320647 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" containerName="setup-container" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.320689 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" containerName="setup-container" Nov 25 09:26:55 crc kubenswrapper[4687]: E1125 09:26:55.320708 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" containerName="rabbitmq" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.320713 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" containerName="rabbitmq" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.320907 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" containerName="rabbitmq" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.322267 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.326762 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.326969 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.328466 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-94z4c" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.329063 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.329316 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.329471 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.329621 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.337006 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.483827 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skdmz\" (UniqueName: \"kubernetes.io/projected/0f0fb06f-00e1-471a-855b-88f34608ca01-kube-api-access-skdmz\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.484433 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f0fb06f-00e1-471a-855b-88f34608ca01-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.484490 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.487670 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f0fb06f-00e1-471a-855b-88f34608ca01-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.487809 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.487857 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.487923 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f0fb06f-00e1-471a-855b-88f34608ca01-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.488005 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f0fb06f-00e1-471a-855b-88f34608ca01-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.488036 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.488098 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f0fb06f-00e1-471a-855b-88f34608ca01-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.488153 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.590487 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skdmz\" (UniqueName: \"kubernetes.io/projected/0f0fb06f-00e1-471a-855b-88f34608ca01-kube-api-access-skdmz\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.590655 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f0fb06f-00e1-471a-855b-88f34608ca01-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.590729 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.590778 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f0fb06f-00e1-471a-855b-88f34608ca01-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.590852 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.590900 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.590941 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f0fb06f-00e1-471a-855b-88f34608ca01-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.591003 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f0fb06f-00e1-471a-855b-88f34608ca01-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.591046 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.591104 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f0fb06f-00e1-471a-855b-88f34608ca01-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.591170 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.591964 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.592126 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.592381 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.595443 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f0fb06f-00e1-471a-855b-88f34608ca01-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.595966 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f0fb06f-00e1-471a-855b-88f34608ca01-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.597743 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f0fb06f-00e1-471a-855b-88f34608ca01-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.597924 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.598208 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f0fb06f-00e1-471a-855b-88f34608ca01-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.598303 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f0fb06f-00e1-471a-855b-88f34608ca01-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.601492 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f0fb06f-00e1-471a-855b-88f34608ca01-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.608925 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skdmz\" (UniqueName: \"kubernetes.io/projected/0f0fb06f-00e1-471a-855b-88f34608ca01-kube-api-access-skdmz\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.655442 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.658778 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.660548 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f0fb06f-00e1-471a-855b-88f34608ca01\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.701815 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.776690 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7" path="/var/lib/kubelet/pods/abf2ba7b-04ee-4461-9aa5-0d1ba9fa1ec7/volumes" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806121 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klxh2\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-kube-api-access-klxh2\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806183 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-tls\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806210 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-erlang-cookie-secret\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806237 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-plugins\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806270 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-config-data\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806313 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-erlang-cookie\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806356 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-confd\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806416 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-plugins-conf\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806476 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806551 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-pod-info\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.806594 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-server-conf\") pod \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\" (UID: \"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee\") " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.807430 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.807490 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.807700 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.812573 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-kube-api-access-klxh2" (OuterVolumeSpecName: "kube-api-access-klxh2") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "kube-api-access-klxh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.813116 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-pod-info" (OuterVolumeSpecName: "pod-info") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.813851 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.815067 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.816316 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.859609 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-config-data" (OuterVolumeSpecName: "config-data") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.877450 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-server-conf" (OuterVolumeSpecName: "server-conf") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.920436 4687 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.920473 4687 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.920528 4687 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.921770 4687 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.921809 4687 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.921822 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klxh2\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-kube-api-access-klxh2\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.921835 4687 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.921846 4687 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.921857 4687 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.921868 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.935741 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" (UID: "9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:26:55 crc kubenswrapper[4687]: I1125 09:26:55.949705 4687 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.023779 4687 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.023801 4687 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.233693 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:26:56 crc kubenswrapper[4687]: W1125 09:26:56.233853 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f0fb06f_00e1_471a_855b_88f34608ca01.slice/crio-4e9d201a6b93b20c3a0588915fc0b7eb3f935794762f18d6d9c09ffd71a4f0fb WatchSource:0}: Error finding container 4e9d201a6b93b20c3a0588915fc0b7eb3f935794762f18d6d9c09ffd71a4f0fb: Status 404 returned error can't find the container with id 4e9d201a6b93b20c3a0588915fc0b7eb3f935794762f18d6d9c09ffd71a4f0fb Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.259537 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f0fb06f-00e1-471a-855b-88f34608ca01","Type":"ContainerStarted","Data":"4e9d201a6b93b20c3a0588915fc0b7eb3f935794762f18d6d9c09ffd71a4f0fb"} Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.265401 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee","Type":"ContainerDied","Data":"5177f22b77c0d87914a05a27143ad7b3889f694436d8cd474f181597bd38c846"} Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.265462 4687 scope.go:117] "RemoveContainer" containerID="71e762c5d8e711770e4fd5b25b4c1dd54aa6f6da8ce4400c2e1b5875f3601c1b" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.265559 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.265488 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.374001 4687 scope.go:117] "RemoveContainer" containerID="9916f15a8da56f333d7b7d285a6679cc365924b75d455e2c7dae8ee65095fcab" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.390443 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" podStartSLOduration=5.390417685 podStartE2EDuration="5.390417685s" podCreationTimestamp="2025-11-25 09:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:26:56.295666231 +0000 UTC m=+1411.349305959" watchObservedRunningTime="2025-11-25 09:26:56.390417685 +0000 UTC m=+1411.444057413" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.401148 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gc2hh"] Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.401364 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gc2hh" podUID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerName="registry-server" containerID="cri-o://a39d6635be424b9f4518b2eb71b0c95f2c3cf07a964996420d6fa062da141a40" gracePeriod=2 Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.416217 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.442559 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.484914 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:26:56 crc kubenswrapper[4687]: E1125 09:26:56.485312 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" containerName="rabbitmq" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.485324 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" containerName="rabbitmq" Nov 25 09:26:56 crc kubenswrapper[4687]: E1125 09:26:56.485339 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" containerName="setup-container" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.485345 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" containerName="setup-container" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.485545 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" containerName="rabbitmq" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.486501 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.491577 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mnbpd" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.496046 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.496218 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.496575 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.496766 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.496770 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.497279 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.497943 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640157 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/244d6f11-290e-4cbe-95b7-04b7555090a9-server-conf\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640244 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/244d6f11-290e-4cbe-95b7-04b7555090a9-pod-info\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640278 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/244d6f11-290e-4cbe-95b7-04b7555090a9-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640327 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640349 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jrx9\" (UniqueName: \"kubernetes.io/projected/244d6f11-290e-4cbe-95b7-04b7555090a9-kube-api-access-8jrx9\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640374 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/244d6f11-290e-4cbe-95b7-04b7555090a9-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640406 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640462 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640531 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/244d6f11-290e-4cbe-95b7-04b7555090a9-config-data\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640567 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.640610 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.742883 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/244d6f11-290e-4cbe-95b7-04b7555090a9-pod-info\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.742955 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/244d6f11-290e-4cbe-95b7-04b7555090a9-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.743006 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jrx9\" (UniqueName: \"kubernetes.io/projected/244d6f11-290e-4cbe-95b7-04b7555090a9-kube-api-access-8jrx9\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.743036 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.743067 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/244d6f11-290e-4cbe-95b7-04b7555090a9-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.743100 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.743157 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.743207 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/244d6f11-290e-4cbe-95b7-04b7555090a9-config-data\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.743241 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.743289 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.743331 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/244d6f11-290e-4cbe-95b7-04b7555090a9-server-conf\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.743538 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.744385 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.744598 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/244d6f11-290e-4cbe-95b7-04b7555090a9-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.744896 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.745055 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/244d6f11-290e-4cbe-95b7-04b7555090a9-config-data\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.745357 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/244d6f11-290e-4cbe-95b7-04b7555090a9-server-conf\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.748925 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.750287 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/244d6f11-290e-4cbe-95b7-04b7555090a9-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.750666 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/244d6f11-290e-4cbe-95b7-04b7555090a9-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.751273 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/244d6f11-290e-4cbe-95b7-04b7555090a9-pod-info\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.769844 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jrx9\" (UniqueName: \"kubernetes.io/projected/244d6f11-290e-4cbe-95b7-04b7555090a9-kube-api-access-8jrx9\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.773948 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"244d6f11-290e-4cbe-95b7-04b7555090a9\") " pod="openstack/rabbitmq-server-0" Nov 25 09:26:56 crc kubenswrapper[4687]: I1125 09:26:56.815044 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:26:57 crc kubenswrapper[4687]: I1125 09:26:57.253267 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:26:57 crc kubenswrapper[4687]: I1125 09:26:57.284106 4687 generic.go:334] "Generic (PLEG): container finished" podID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerID="a39d6635be424b9f4518b2eb71b0c95f2c3cf07a964996420d6fa062da141a40" exitCode=0 Nov 25 09:26:57 crc kubenswrapper[4687]: I1125 09:26:57.284177 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gc2hh" event={"ID":"b45af2b9-2709-424d-add3-a30bbd8aea7c","Type":"ContainerDied","Data":"a39d6635be424b9f4518b2eb71b0c95f2c3cf07a964996420d6fa062da141a40"} Nov 25 09:26:57 crc kubenswrapper[4687]: I1125 09:26:57.287616 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"244d6f11-290e-4cbe-95b7-04b7555090a9","Type":"ContainerStarted","Data":"e40a78e58bb137a9ec611f115c75c0014509a81d555f43c582694c176bc3acf0"} Nov 25 09:26:57 crc kubenswrapper[4687]: I1125 09:26:57.747605 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee" path="/var/lib/kubelet/pods/9ec38193-8d1c-499d-8a5f-ec6f86b6c3ee/volumes" Nov 25 09:26:57 crc kubenswrapper[4687]: I1125 09:26:57.904237 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.071169 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-catalog-content\") pod \"b45af2b9-2709-424d-add3-a30bbd8aea7c\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.071221 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mz5bk\" (UniqueName: \"kubernetes.io/projected/b45af2b9-2709-424d-add3-a30bbd8aea7c-kube-api-access-mz5bk\") pod \"b45af2b9-2709-424d-add3-a30bbd8aea7c\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.071311 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-utilities\") pod \"b45af2b9-2709-424d-add3-a30bbd8aea7c\" (UID: \"b45af2b9-2709-424d-add3-a30bbd8aea7c\") " Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.072018 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-utilities" (OuterVolumeSpecName: "utilities") pod "b45af2b9-2709-424d-add3-a30bbd8aea7c" (UID: "b45af2b9-2709-424d-add3-a30bbd8aea7c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.076060 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b45af2b9-2709-424d-add3-a30bbd8aea7c-kube-api-access-mz5bk" (OuterVolumeSpecName: "kube-api-access-mz5bk") pod "b45af2b9-2709-424d-add3-a30bbd8aea7c" (UID: "b45af2b9-2709-424d-add3-a30bbd8aea7c"). InnerVolumeSpecName "kube-api-access-mz5bk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.142720 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b45af2b9-2709-424d-add3-a30bbd8aea7c" (UID: "b45af2b9-2709-424d-add3-a30bbd8aea7c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.173611 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.173650 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mz5bk\" (UniqueName: \"kubernetes.io/projected/b45af2b9-2709-424d-add3-a30bbd8aea7c-kube-api-access-mz5bk\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.173664 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b45af2b9-2709-424d-add3-a30bbd8aea7c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.296746 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gc2hh" event={"ID":"b45af2b9-2709-424d-add3-a30bbd8aea7c","Type":"ContainerDied","Data":"c72e000c2d2bb5907065db2ef8a2711d3b0241d92bad9f425d03cec8f7313a42"} Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.296798 4687 scope.go:117] "RemoveContainer" containerID="a39d6635be424b9f4518b2eb71b0c95f2c3cf07a964996420d6fa062da141a40" Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.296892 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gc2hh" Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.300835 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f0fb06f-00e1-471a-855b-88f34608ca01","Type":"ContainerStarted","Data":"62e641e439a723cd9909eb6e4b4a415f872d95cee6c95ffd86ea2f401b8ea730"} Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.335328 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gc2hh"] Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.345330 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gc2hh"] Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.403561 4687 scope.go:117] "RemoveContainer" containerID="80aaef569dd0c18f55a4ce7a932c3f7dcb86ecc1634eff3ec3bb1ebc0e9ed8c4" Nov 25 09:26:58 crc kubenswrapper[4687]: I1125 09:26:58.440054 4687 scope.go:117] "RemoveContainer" containerID="f5558190d967a43fc6eb332592fe126f5ef22e7381700dc3c7f966ddee3d79b5" Nov 25 09:26:59 crc kubenswrapper[4687]: I1125 09:26:59.755939 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b45af2b9-2709-424d-add3-a30bbd8aea7c" path="/var/lib/kubelet/pods/b45af2b9-2709-424d-add3-a30bbd8aea7c/volumes" Nov 25 09:26:59 crc kubenswrapper[4687]: I1125 09:26:59.788619 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:26:59 crc kubenswrapper[4687]: I1125 09:26:59.788678 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:27:00 crc kubenswrapper[4687]: I1125 09:27:00.338621 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"244d6f11-290e-4cbe-95b7-04b7555090a9","Type":"ContainerStarted","Data":"62518dc7c4d8d1abf066e3fe468703ccee0dbecd37811637e0706340d711ec18"} Nov 25 09:27:00 crc kubenswrapper[4687]: I1125 09:27:00.845168 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7wngm" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerName="registry-server" probeResult="failure" output=< Nov 25 09:27:00 crc kubenswrapper[4687]: timeout: failed to connect service ":50051" within 1s Nov 25 09:27:00 crc kubenswrapper[4687]: > Nov 25 09:27:01 crc kubenswrapper[4687]: I1125 09:27:01.887263 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:27:01 crc kubenswrapper[4687]: I1125 09:27:01.962843 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-nng4f"] Nov 25 09:27:01 crc kubenswrapper[4687]: I1125 09:27:01.963214 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" podUID="52d224c6-fc25-45ce-bb59-9d91bf05df17" containerName="dnsmasq-dns" containerID="cri-o://3a4b3a62093040a6d62c070b53efca092d4098caa5f02ab9a007960a259e5813" gracePeriod=10 Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.249144 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-bjn64"] Nov 25 09:27:02 crc kubenswrapper[4687]: E1125 09:27:02.249540 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerName="extract-utilities" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.249555 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerName="extract-utilities" Nov 25 09:27:02 crc kubenswrapper[4687]: E1125 09:27:02.249577 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerName="registry-server" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.249584 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerName="registry-server" Nov 25 09:27:02 crc kubenswrapper[4687]: E1125 09:27:02.249616 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerName="extract-content" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.249622 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerName="extract-content" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.249792 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b45af2b9-2709-424d-add3-a30bbd8aea7c" containerName="registry-server" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.250730 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.264868 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-bjn64"] Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.356126 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-config\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.356198 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.356220 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.356337 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.356374 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.356421 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f7fr\" (UniqueName: \"kubernetes.io/projected/68d33f41-95f3-41f3-847a-76b368d367cd-kube-api-access-6f7fr\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.356468 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.356760 4687 generic.go:334] "Generic (PLEG): container finished" podID="52d224c6-fc25-45ce-bb59-9d91bf05df17" containerID="3a4b3a62093040a6d62c070b53efca092d4098caa5f02ab9a007960a259e5813" exitCode=0 Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.356794 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" event={"ID":"52d224c6-fc25-45ce-bb59-9d91bf05df17","Type":"ContainerDied","Data":"3a4b3a62093040a6d62c070b53efca092d4098caa5f02ab9a007960a259e5813"} Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.458021 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-config\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.458460 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.458489 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.458580 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.458659 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.458723 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f7fr\" (UniqueName: \"kubernetes.io/projected/68d33f41-95f3-41f3-847a-76b368d367cd-kube-api-access-6f7fr\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.458775 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.458830 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-config\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.459442 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.459921 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.460039 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.460127 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.460237 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/68d33f41-95f3-41f3-847a-76b368d367cd-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.477998 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f7fr\" (UniqueName: \"kubernetes.io/projected/68d33f41-95f3-41f3-847a-76b368d367cd-kube-api-access-6f7fr\") pod \"dnsmasq-dns-78c64bc9c5-bjn64\" (UID: \"68d33f41-95f3-41f3-847a-76b368d367cd\") " pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.606935 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:02 crc kubenswrapper[4687]: I1125 09:27:02.820927 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" podUID="52d224c6-fc25-45ce-bb59-9d91bf05df17" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.195:5353: connect: connection refused" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.071412 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-bjn64"] Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.327190 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.371975 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" event={"ID":"52d224c6-fc25-45ce-bb59-9d91bf05df17","Type":"ContainerDied","Data":"ac53d1d98b7ef2ceebdae38d25cd3973862db2b3491344499de0d9e951664b14"} Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.371989 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-nng4f" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.372033 4687 scope.go:117] "RemoveContainer" containerID="3a4b3a62093040a6d62c070b53efca092d4098caa5f02ab9a007960a259e5813" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.375040 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" event={"ID":"68d33f41-95f3-41f3-847a-76b368d367cd","Type":"ContainerStarted","Data":"f3df0b5943a56907c4d5aef8cad73355d89d54ab2b2e160f922c7b2b20029c91"} Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.383266 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-sb\") pod \"52d224c6-fc25-45ce-bb59-9d91bf05df17\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.383970 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-nb\") pod \"52d224c6-fc25-45ce-bb59-9d91bf05df17\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.384034 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-svc\") pod \"52d224c6-fc25-45ce-bb59-9d91bf05df17\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.384075 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk89q\" (UniqueName: \"kubernetes.io/projected/52d224c6-fc25-45ce-bb59-9d91bf05df17-kube-api-access-nk89q\") pod \"52d224c6-fc25-45ce-bb59-9d91bf05df17\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.384105 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-swift-storage-0\") pod \"52d224c6-fc25-45ce-bb59-9d91bf05df17\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.384210 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-config\") pod \"52d224c6-fc25-45ce-bb59-9d91bf05df17\" (UID: \"52d224c6-fc25-45ce-bb59-9d91bf05df17\") " Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.402527 4687 scope.go:117] "RemoveContainer" containerID="927a9a3b8d667f984bae7f7cb2cd2dba24186e188846298950bfafdc783bd64f" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.402762 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52d224c6-fc25-45ce-bb59-9d91bf05df17-kube-api-access-nk89q" (OuterVolumeSpecName: "kube-api-access-nk89q") pod "52d224c6-fc25-45ce-bb59-9d91bf05df17" (UID: "52d224c6-fc25-45ce-bb59-9d91bf05df17"). InnerVolumeSpecName "kube-api-access-nk89q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.485639 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "52d224c6-fc25-45ce-bb59-9d91bf05df17" (UID: "52d224c6-fc25-45ce-bb59-9d91bf05df17"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.487474 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nk89q\" (UniqueName: \"kubernetes.io/projected/52d224c6-fc25-45ce-bb59-9d91bf05df17-kube-api-access-nk89q\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.487523 4687 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.499760 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "52d224c6-fc25-45ce-bb59-9d91bf05df17" (UID: "52d224c6-fc25-45ce-bb59-9d91bf05df17"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.503806 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "52d224c6-fc25-45ce-bb59-9d91bf05df17" (UID: "52d224c6-fc25-45ce-bb59-9d91bf05df17"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.506669 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-config" (OuterVolumeSpecName: "config") pod "52d224c6-fc25-45ce-bb59-9d91bf05df17" (UID: "52d224c6-fc25-45ce-bb59-9d91bf05df17"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.507113 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "52d224c6-fc25-45ce-bb59-9d91bf05df17" (UID: "52d224c6-fc25-45ce-bb59-9d91bf05df17"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.589754 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.590156 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.590175 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.590191 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d224c6-fc25-45ce-bb59-9d91bf05df17-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.798529 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-nng4f"] Nov 25 09:27:03 crc kubenswrapper[4687]: I1125 09:27:03.806535 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-nng4f"] Nov 25 09:27:04 crc kubenswrapper[4687]: I1125 09:27:04.383614 4687 generic.go:334] "Generic (PLEG): container finished" podID="68d33f41-95f3-41f3-847a-76b368d367cd" containerID="9ae1cf18c7db110735f56f6cf08b9ec61f0bf217fd3dde5be85c46cc6e2de597" exitCode=0 Nov 25 09:27:04 crc kubenswrapper[4687]: I1125 09:27:04.383680 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" event={"ID":"68d33f41-95f3-41f3-847a-76b368d367cd","Type":"ContainerDied","Data":"9ae1cf18c7db110735f56f6cf08b9ec61f0bf217fd3dde5be85c46cc6e2de597"} Nov 25 09:27:05 crc kubenswrapper[4687]: I1125 09:27:05.397664 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" event={"ID":"68d33f41-95f3-41f3-847a-76b368d367cd","Type":"ContainerStarted","Data":"95416cc38b5b778ea1bc6efe666db8a30ef71db32fe69128c4b9dbca2a30fc02"} Nov 25 09:27:05 crc kubenswrapper[4687]: I1125 09:27:05.399877 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:05 crc kubenswrapper[4687]: I1125 09:27:05.760676 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52d224c6-fc25-45ce-bb59-9d91bf05df17" path="/var/lib/kubelet/pods/52d224c6-fc25-45ce-bb59-9d91bf05df17/volumes" Nov 25 09:27:10 crc kubenswrapper[4687]: I1125 09:27:10.846048 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7wngm" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerName="registry-server" probeResult="failure" output=< Nov 25 09:27:10 crc kubenswrapper[4687]: timeout: failed to connect service ":50051" within 1s Nov 25 09:27:10 crc kubenswrapper[4687]: > Nov 25 09:27:12 crc kubenswrapper[4687]: I1125 09:27:12.609215 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" Nov 25 09:27:12 crc kubenswrapper[4687]: I1125 09:27:12.633634 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78c64bc9c5-bjn64" podStartSLOduration=10.633608945 podStartE2EDuration="10.633608945s" podCreationTimestamp="2025-11-25 09:27:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:27:05.428612352 +0000 UTC m=+1420.482252080" watchObservedRunningTime="2025-11-25 09:27:12.633608945 +0000 UTC m=+1427.687248663" Nov 25 09:27:12 crc kubenswrapper[4687]: I1125 09:27:12.678833 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-6bz7h"] Nov 25 09:27:12 crc kubenswrapper[4687]: I1125 09:27:12.679460 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" podUID="b864c704-e14e-4b2c-9da3-e93feb535259" containerName="dnsmasq-dns" containerID="cri-o://f1265df310ed4bf60d150ea56f50e9b96e9205ff332736b20cf4078997f754cc" gracePeriod=10 Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.485852 4687 generic.go:334] "Generic (PLEG): container finished" podID="b864c704-e14e-4b2c-9da3-e93feb535259" containerID="f1265df310ed4bf60d150ea56f50e9b96e9205ff332736b20cf4078997f754cc" exitCode=0 Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.486197 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" event={"ID":"b864c704-e14e-4b2c-9da3-e93feb535259","Type":"ContainerDied","Data":"f1265df310ed4bf60d150ea56f50e9b96e9205ff332736b20cf4078997f754cc"} Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.774579 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.952057 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-config\") pod \"b864c704-e14e-4b2c-9da3-e93feb535259\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.952146 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-sb\") pod \"b864c704-e14e-4b2c-9da3-e93feb535259\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.952254 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-swift-storage-0\") pod \"b864c704-e14e-4b2c-9da3-e93feb535259\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.952360 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-openstack-edpm-ipam\") pod \"b864c704-e14e-4b2c-9da3-e93feb535259\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.952381 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-nb\") pod \"b864c704-e14e-4b2c-9da3-e93feb535259\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.952414 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2mtt\" (UniqueName: \"kubernetes.io/projected/b864c704-e14e-4b2c-9da3-e93feb535259-kube-api-access-p2mtt\") pod \"b864c704-e14e-4b2c-9da3-e93feb535259\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.952469 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-svc\") pod \"b864c704-e14e-4b2c-9da3-e93feb535259\" (UID: \"b864c704-e14e-4b2c-9da3-e93feb535259\") " Nov 25 09:27:13 crc kubenswrapper[4687]: I1125 09:27:13.960562 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b864c704-e14e-4b2c-9da3-e93feb535259-kube-api-access-p2mtt" (OuterVolumeSpecName: "kube-api-access-p2mtt") pod "b864c704-e14e-4b2c-9da3-e93feb535259" (UID: "b864c704-e14e-4b2c-9da3-e93feb535259"). InnerVolumeSpecName "kube-api-access-p2mtt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.017249 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "b864c704-e14e-4b2c-9da3-e93feb535259" (UID: "b864c704-e14e-4b2c-9da3-e93feb535259"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.018348 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-config" (OuterVolumeSpecName: "config") pod "b864c704-e14e-4b2c-9da3-e93feb535259" (UID: "b864c704-e14e-4b2c-9da3-e93feb535259"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.022110 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b864c704-e14e-4b2c-9da3-e93feb535259" (UID: "b864c704-e14e-4b2c-9da3-e93feb535259"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.029446 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b864c704-e14e-4b2c-9da3-e93feb535259" (UID: "b864c704-e14e-4b2c-9da3-e93feb535259"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.042974 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b864c704-e14e-4b2c-9da3-e93feb535259" (UID: "b864c704-e14e-4b2c-9da3-e93feb535259"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.046829 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b864c704-e14e-4b2c-9da3-e93feb535259" (UID: "b864c704-e14e-4b2c-9da3-e93feb535259"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.057725 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.057767 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2mtt\" (UniqueName: \"kubernetes.io/projected/b864c704-e14e-4b2c-9da3-e93feb535259-kube-api-access-p2mtt\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.057785 4687 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.057797 4687 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.057807 4687 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.057818 4687 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.057829 4687 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b864c704-e14e-4b2c-9da3-e93feb535259-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.495274 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" event={"ID":"b864c704-e14e-4b2c-9da3-e93feb535259","Type":"ContainerDied","Data":"5272315b1d7738b6e496b89ed45cd67adce8a65f345ea3bea5865359c55abaad"} Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.495326 4687 scope.go:117] "RemoveContainer" containerID="f1265df310ed4bf60d150ea56f50e9b96e9205ff332736b20cf4078997f754cc" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.495347 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-6bz7h" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.520728 4687 scope.go:117] "RemoveContainer" containerID="72abcfc8ad8c4ca4f7fae293a7355a64db88d9913a86d25fc0a2b49505b123b5" Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.525044 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-6bz7h"] Nov 25 09:27:14 crc kubenswrapper[4687]: I1125 09:27:14.533161 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-6bz7h"] Nov 25 09:27:15 crc kubenswrapper[4687]: I1125 09:27:15.748136 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b864c704-e14e-4b2c-9da3-e93feb535259" path="/var/lib/kubelet/pods/b864c704-e14e-4b2c-9da3-e93feb535259/volumes" Nov 25 09:27:19 crc kubenswrapper[4687]: I1125 09:27:19.844084 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:27:19 crc kubenswrapper[4687]: I1125 09:27:19.893592 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:27:20 crc kubenswrapper[4687]: I1125 09:27:20.093839 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7wngm"] Nov 25 09:27:21 crc kubenswrapper[4687]: I1125 09:27:21.564383 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7wngm" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerName="registry-server" containerID="cri-o://f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b" gracePeriod=2 Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.600382 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.600429 4687 generic.go:334] "Generic (PLEG): container finished" podID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerID="f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b" exitCode=0 Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.600465 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wngm" event={"ID":"2409926b-0e0e-4de9-87f9-b61caf4f7c31","Type":"ContainerDied","Data":"f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b"} Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.601231 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wngm" event={"ID":"2409926b-0e0e-4de9-87f9-b61caf4f7c31","Type":"ContainerDied","Data":"38fe1164305419b014df447a79e4cda675aefa3bc6f62ae85511f91713dd2f50"} Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.601264 4687 scope.go:117] "RemoveContainer" containerID="f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.622669 4687 scope.go:117] "RemoveContainer" containerID="ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.647191 4687 scope.go:117] "RemoveContainer" containerID="07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.697815 4687 scope.go:117] "RemoveContainer" containerID="f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b" Nov 25 09:27:22 crc kubenswrapper[4687]: E1125 09:27:22.698836 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b\": container with ID starting with f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b not found: ID does not exist" containerID="f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.698880 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b"} err="failed to get container status \"f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b\": rpc error: code = NotFound desc = could not find container \"f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b\": container with ID starting with f54fb4972ad6a56153788dcddd9eac5e1054c767075f4b9a576b4a0cfb742a9b not found: ID does not exist" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.698907 4687 scope.go:117] "RemoveContainer" containerID="ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75" Nov 25 09:27:22 crc kubenswrapper[4687]: E1125 09:27:22.699834 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75\": container with ID starting with ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75 not found: ID does not exist" containerID="ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.699863 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75"} err="failed to get container status \"ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75\": rpc error: code = NotFound desc = could not find container \"ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75\": container with ID starting with ace083961e43178241bfbb2f74fb84f84c05d2ed804d9dd58c3f53bde66aab75 not found: ID does not exist" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.699882 4687 scope.go:117] "RemoveContainer" containerID="07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2" Nov 25 09:27:22 crc kubenswrapper[4687]: E1125 09:27:22.700123 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2\": container with ID starting with 07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2 not found: ID does not exist" containerID="07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.700149 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2"} err="failed to get container status \"07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2\": rpc error: code = NotFound desc = could not find container \"07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2\": container with ID starting with 07954621bed3c080329d54bea24740eed03ca600a79fd6a38521171afb83fdf2 not found: ID does not exist" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.722535 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8jl8\" (UniqueName: \"kubernetes.io/projected/2409926b-0e0e-4de9-87f9-b61caf4f7c31-kube-api-access-t8jl8\") pod \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.722692 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-catalog-content\") pod \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.723651 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-utilities\") pod \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\" (UID: \"2409926b-0e0e-4de9-87f9-b61caf4f7c31\") " Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.724374 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-utilities" (OuterVolumeSpecName: "utilities") pod "2409926b-0e0e-4de9-87f9-b61caf4f7c31" (UID: "2409926b-0e0e-4de9-87f9-b61caf4f7c31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.724595 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.729793 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2409926b-0e0e-4de9-87f9-b61caf4f7c31-kube-api-access-t8jl8" (OuterVolumeSpecName: "kube-api-access-t8jl8") pod "2409926b-0e0e-4de9-87f9-b61caf4f7c31" (UID: "2409926b-0e0e-4de9-87f9-b61caf4f7c31"). InnerVolumeSpecName "kube-api-access-t8jl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.806294 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2409926b-0e0e-4de9-87f9-b61caf4f7c31" (UID: "2409926b-0e0e-4de9-87f9-b61caf4f7c31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.827195 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2409926b-0e0e-4de9-87f9-b61caf4f7c31-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:22 crc kubenswrapper[4687]: I1125 09:27:22.827407 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8jl8\" (UniqueName: \"kubernetes.io/projected/2409926b-0e0e-4de9-87f9-b61caf4f7c31-kube-api-access-t8jl8\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:23 crc kubenswrapper[4687]: I1125 09:27:23.612623 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wngm" Nov 25 09:27:23 crc kubenswrapper[4687]: I1125 09:27:23.651472 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7wngm"] Nov 25 09:27:23 crc kubenswrapper[4687]: I1125 09:27:23.659782 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7wngm"] Nov 25 09:27:23 crc kubenswrapper[4687]: I1125 09:27:23.749238 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" path="/var/lib/kubelet/pods/2409926b-0e0e-4de9-87f9-b61caf4f7c31/volumes" Nov 25 09:27:23 crc kubenswrapper[4687]: I1125 09:27:23.844587 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:27:23 crc kubenswrapper[4687]: I1125 09:27:23.844660 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.994250 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk"] Nov 25 09:27:25 crc kubenswrapper[4687]: E1125 09:27:25.995205 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b864c704-e14e-4b2c-9da3-e93feb535259" containerName="dnsmasq-dns" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.995229 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b864c704-e14e-4b2c-9da3-e93feb535259" containerName="dnsmasq-dns" Nov 25 09:27:25 crc kubenswrapper[4687]: E1125 09:27:25.995253 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d224c6-fc25-45ce-bb59-9d91bf05df17" containerName="init" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.995264 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d224c6-fc25-45ce-bb59-9d91bf05df17" containerName="init" Nov 25 09:27:25 crc kubenswrapper[4687]: E1125 09:27:25.995290 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerName="extract-utilities" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.995302 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerName="extract-utilities" Nov 25 09:27:25 crc kubenswrapper[4687]: E1125 09:27:25.995326 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerName="extract-content" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.995337 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerName="extract-content" Nov 25 09:27:25 crc kubenswrapper[4687]: E1125 09:27:25.995356 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b864c704-e14e-4b2c-9da3-e93feb535259" containerName="init" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.995367 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="b864c704-e14e-4b2c-9da3-e93feb535259" containerName="init" Nov 25 09:27:25 crc kubenswrapper[4687]: E1125 09:27:25.995404 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d224c6-fc25-45ce-bb59-9d91bf05df17" containerName="dnsmasq-dns" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.995416 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d224c6-fc25-45ce-bb59-9d91bf05df17" containerName="dnsmasq-dns" Nov 25 09:27:25 crc kubenswrapper[4687]: E1125 09:27:25.995452 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerName="registry-server" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.995462 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerName="registry-server" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.995775 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="b864c704-e14e-4b2c-9da3-e93feb535259" containerName="dnsmasq-dns" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.995805 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="2409926b-0e0e-4de9-87f9-b61caf4f7c31" containerName="registry-server" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.995849 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="52d224c6-fc25-45ce-bb59-9d91bf05df17" containerName="dnsmasq-dns" Nov 25 09:27:25 crc kubenswrapper[4687]: I1125 09:27:25.997231 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.000246 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.000424 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.000445 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.003249 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk"] Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.007230 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.086452 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.086692 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.086780 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.086877 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2rbm\" (UniqueName: \"kubernetes.io/projected/59cc9836-1eba-484b-9c23-78e3368be44c-kube-api-access-q2rbm\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.188316 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.189171 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.189256 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2rbm\" (UniqueName: \"kubernetes.io/projected/59cc9836-1eba-484b-9c23-78e3368be44c-kube-api-access-q2rbm\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.189296 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.194656 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.195086 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.203541 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.210578 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2rbm\" (UniqueName: \"kubernetes.io/projected/59cc9836-1eba-484b-9c23-78e3368be44c-kube-api-access-q2rbm\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.323646 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:26 crc kubenswrapper[4687]: I1125 09:27:26.891923 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk"] Nov 25 09:27:26 crc kubenswrapper[4687]: W1125 09:27:26.895170 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59cc9836_1eba_484b_9c23_78e3368be44c.slice/crio-91748a0395aa721d4f1c5b417692b7ca499cf26eb35f43239f6085e2b866a25a WatchSource:0}: Error finding container 91748a0395aa721d4f1c5b417692b7ca499cf26eb35f43239f6085e2b866a25a: Status 404 returned error can't find the container with id 91748a0395aa721d4f1c5b417692b7ca499cf26eb35f43239f6085e2b866a25a Nov 25 09:27:27 crc kubenswrapper[4687]: I1125 09:27:27.648345 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" event={"ID":"59cc9836-1eba-484b-9c23-78e3368be44c","Type":"ContainerStarted","Data":"91748a0395aa721d4f1c5b417692b7ca499cf26eb35f43239f6085e2b866a25a"} Nov 25 09:27:31 crc kubenswrapper[4687]: I1125 09:27:31.699091 4687 generic.go:334] "Generic (PLEG): container finished" podID="0f0fb06f-00e1-471a-855b-88f34608ca01" containerID="62e641e439a723cd9909eb6e4b4a415f872d95cee6c95ffd86ea2f401b8ea730" exitCode=0 Nov 25 09:27:31 crc kubenswrapper[4687]: I1125 09:27:31.699472 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f0fb06f-00e1-471a-855b-88f34608ca01","Type":"ContainerDied","Data":"62e641e439a723cd9909eb6e4b4a415f872d95cee6c95ffd86ea2f401b8ea730"} Nov 25 09:27:32 crc kubenswrapper[4687]: I1125 09:27:32.712157 4687 generic.go:334] "Generic (PLEG): container finished" podID="244d6f11-290e-4cbe-95b7-04b7555090a9" containerID="62518dc7c4d8d1abf066e3fe468703ccee0dbecd37811637e0706340d711ec18" exitCode=0 Nov 25 09:27:32 crc kubenswrapper[4687]: I1125 09:27:32.712255 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"244d6f11-290e-4cbe-95b7-04b7555090a9","Type":"ContainerDied","Data":"62518dc7c4d8d1abf066e3fe468703ccee0dbecd37811637e0706340d711ec18"} Nov 25 09:27:35 crc kubenswrapper[4687]: I1125 09:27:35.746861 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"244d6f11-290e-4cbe-95b7-04b7555090a9","Type":"ContainerStarted","Data":"4d8ca6ab6bff467e1cd525d334e30fa1863e630d197535117a749c5a19b9e553"} Nov 25 09:27:35 crc kubenswrapper[4687]: I1125 09:27:35.747423 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" event={"ID":"59cc9836-1eba-484b-9c23-78e3368be44c","Type":"ContainerStarted","Data":"01f24d3271b92e1b4d423650ebf8391e56bbb1472594e0ea53124897a8679cac"} Nov 25 09:27:35 crc kubenswrapper[4687]: I1125 09:27:35.748265 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 09:27:35 crc kubenswrapper[4687]: I1125 09:27:35.750159 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f0fb06f-00e1-471a-855b-88f34608ca01","Type":"ContainerStarted","Data":"c4c9946c67cd70eef0b90a1a5cb997b8703e40068faa004e450dc9ac09858e89"} Nov 25 09:27:35 crc kubenswrapper[4687]: I1125 09:27:35.750962 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:27:35 crc kubenswrapper[4687]: I1125 09:27:35.786235 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.786216237 podStartE2EDuration="39.786216237s" podCreationTimestamp="2025-11-25 09:26:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:27:35.781753536 +0000 UTC m=+1450.835393264" watchObservedRunningTime="2025-11-25 09:27:35.786216237 +0000 UTC m=+1450.839855955" Nov 25 09:27:35 crc kubenswrapper[4687]: I1125 09:27:35.810805 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" podStartSLOduration=2.295567122 podStartE2EDuration="10.810784254s" podCreationTimestamp="2025-11-25 09:27:25 +0000 UTC" firstStartedPulling="2025-11-25 09:27:26.897275094 +0000 UTC m=+1441.950914812" lastFinishedPulling="2025-11-25 09:27:35.412492226 +0000 UTC m=+1450.466131944" observedRunningTime="2025-11-25 09:27:35.799340704 +0000 UTC m=+1450.852980442" watchObservedRunningTime="2025-11-25 09:27:35.810784254 +0000 UTC m=+1450.864423972" Nov 25 09:27:35 crc kubenswrapper[4687]: I1125 09:27:35.825546 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=40.825524945 podStartE2EDuration="40.825524945s" podCreationTimestamp="2025-11-25 09:26:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:27:35.822904324 +0000 UTC m=+1450.876544042" watchObservedRunningTime="2025-11-25 09:27:35.825524945 +0000 UTC m=+1450.879164663" Nov 25 09:27:45 crc kubenswrapper[4687]: I1125 09:27:45.704519 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="0f0fb06f-00e1-471a-855b-88f34608ca01" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.205:5671: connect: connection refused" Nov 25 09:27:46 crc kubenswrapper[4687]: I1125 09:27:46.819863 4687 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="244d6f11-290e-4cbe-95b7-04b7555090a9" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.206:5671: connect: connection refused" Nov 25 09:27:49 crc kubenswrapper[4687]: I1125 09:27:49.903306 4687 generic.go:334] "Generic (PLEG): container finished" podID="59cc9836-1eba-484b-9c23-78e3368be44c" containerID="01f24d3271b92e1b4d423650ebf8391e56bbb1472594e0ea53124897a8679cac" exitCode=0 Nov 25 09:27:49 crc kubenswrapper[4687]: I1125 09:27:49.903394 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" event={"ID":"59cc9836-1eba-484b-9c23-78e3368be44c","Type":"ContainerDied","Data":"01f24d3271b92e1b4d423650ebf8391e56bbb1472594e0ea53124897a8679cac"} Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.358827 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.461326 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-repo-setup-combined-ca-bundle\") pod \"59cc9836-1eba-484b-9c23-78e3368be44c\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.461524 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-ssh-key\") pod \"59cc9836-1eba-484b-9c23-78e3368be44c\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.461585 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-inventory\") pod \"59cc9836-1eba-484b-9c23-78e3368be44c\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.461664 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2rbm\" (UniqueName: \"kubernetes.io/projected/59cc9836-1eba-484b-9c23-78e3368be44c-kube-api-access-q2rbm\") pod \"59cc9836-1eba-484b-9c23-78e3368be44c\" (UID: \"59cc9836-1eba-484b-9c23-78e3368be44c\") " Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.466776 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "59cc9836-1eba-484b-9c23-78e3368be44c" (UID: "59cc9836-1eba-484b-9c23-78e3368be44c"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.472259 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59cc9836-1eba-484b-9c23-78e3368be44c-kube-api-access-q2rbm" (OuterVolumeSpecName: "kube-api-access-q2rbm") pod "59cc9836-1eba-484b-9c23-78e3368be44c" (UID: "59cc9836-1eba-484b-9c23-78e3368be44c"). InnerVolumeSpecName "kube-api-access-q2rbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.488584 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "59cc9836-1eba-484b-9c23-78e3368be44c" (UID: "59cc9836-1eba-484b-9c23-78e3368be44c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.494241 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-inventory" (OuterVolumeSpecName: "inventory") pod "59cc9836-1eba-484b-9c23-78e3368be44c" (UID: "59cc9836-1eba-484b-9c23-78e3368be44c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.563249 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.563291 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.563305 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2rbm\" (UniqueName: \"kubernetes.io/projected/59cc9836-1eba-484b-9c23-78e3368be44c-kube-api-access-q2rbm\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.563318 4687 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59cc9836-1eba-484b-9c23-78e3368be44c-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.950733 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" event={"ID":"59cc9836-1eba-484b-9c23-78e3368be44c","Type":"ContainerDied","Data":"91748a0395aa721d4f1c5b417692b7ca499cf26eb35f43239f6085e2b866a25a"} Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.950785 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91748a0395aa721d4f1c5b417692b7ca499cf26eb35f43239f6085e2b866a25a" Nov 25 09:27:51 crc kubenswrapper[4687]: I1125 09:27:51.950784 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.020647 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr"] Nov 25 09:27:52 crc kubenswrapper[4687]: E1125 09:27:52.021414 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59cc9836-1eba-484b-9c23-78e3368be44c" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.021439 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="59cc9836-1eba-484b-9c23-78e3368be44c" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.021729 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="59cc9836-1eba-484b-9c23-78e3368be44c" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.022711 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.025742 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.025956 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.026267 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.026380 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.034639 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr"] Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.172337 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xp2dr\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.172470 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldbtv\" (UniqueName: \"kubernetes.io/projected/0d29aa5d-d14b-4a11-9929-84c1770afb05-kube-api-access-ldbtv\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xp2dr\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.172682 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xp2dr\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.275057 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldbtv\" (UniqueName: \"kubernetes.io/projected/0d29aa5d-d14b-4a11-9929-84c1770afb05-kube-api-access-ldbtv\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xp2dr\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.275121 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xp2dr\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.275248 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xp2dr\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.279455 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xp2dr\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.280326 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xp2dr\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.303607 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldbtv\" (UniqueName: \"kubernetes.io/projected/0d29aa5d-d14b-4a11-9929-84c1770afb05-kube-api-access-ldbtv\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xp2dr\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.341652 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.835218 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr"] Nov 25 09:27:52 crc kubenswrapper[4687]: I1125 09:27:52.960895 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" event={"ID":"0d29aa5d-d14b-4a11-9929-84c1770afb05","Type":"ContainerStarted","Data":"f9a1149b122d8fe8d21849ae2c518173211f40b4411aa870407b280d299a71f9"} Nov 25 09:27:53 crc kubenswrapper[4687]: I1125 09:27:53.844469 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:27:53 crc kubenswrapper[4687]: I1125 09:27:53.845143 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:27:53 crc kubenswrapper[4687]: I1125 09:27:53.845197 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:27:53 crc kubenswrapper[4687]: I1125 09:27:53.845958 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"026b07db34bc04118ebe444d3596042b6afeddebbaa71ea4729e8d639abf5885"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:27:53 crc kubenswrapper[4687]: I1125 09:27:53.846010 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://026b07db34bc04118ebe444d3596042b6afeddebbaa71ea4729e8d639abf5885" gracePeriod=600 Nov 25 09:27:53 crc kubenswrapper[4687]: I1125 09:27:53.976604 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="026b07db34bc04118ebe444d3596042b6afeddebbaa71ea4729e8d639abf5885" exitCode=0 Nov 25 09:27:53 crc kubenswrapper[4687]: I1125 09:27:53.976793 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"026b07db34bc04118ebe444d3596042b6afeddebbaa71ea4729e8d639abf5885"} Nov 25 09:27:53 crc kubenswrapper[4687]: I1125 09:27:53.976895 4687 scope.go:117] "RemoveContainer" containerID="0e89ebd4720ffd5c135c8bc00be72ce7345dd6f93bd878517e70d876f94fe463" Nov 25 09:27:53 crc kubenswrapper[4687]: I1125 09:27:53.979254 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" event={"ID":"0d29aa5d-d14b-4a11-9929-84c1770afb05","Type":"ContainerStarted","Data":"79a83c22f9d51a9a77687a96726f5ca3286a7dc819100c12efb042954adaee13"} Nov 25 09:27:54 crc kubenswrapper[4687]: I1125 09:27:54.007725 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" podStartSLOduration=2.397281832 podStartE2EDuration="3.007705261s" podCreationTimestamp="2025-11-25 09:27:51 +0000 UTC" firstStartedPulling="2025-11-25 09:27:52.845683229 +0000 UTC m=+1467.899322937" lastFinishedPulling="2025-11-25 09:27:53.456106638 +0000 UTC m=+1468.509746366" observedRunningTime="2025-11-25 09:27:54.003406534 +0000 UTC m=+1469.057046252" watchObservedRunningTime="2025-11-25 09:27:54.007705261 +0000 UTC m=+1469.061344979" Nov 25 09:27:55 crc kubenswrapper[4687]: I1125 09:27:55.000758 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895"} Nov 25 09:27:55 crc kubenswrapper[4687]: I1125 09:27:55.704125 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:27:56 crc kubenswrapper[4687]: I1125 09:27:56.816982 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 09:27:57 crc kubenswrapper[4687]: I1125 09:27:57.019621 4687 generic.go:334] "Generic (PLEG): container finished" podID="0d29aa5d-d14b-4a11-9929-84c1770afb05" containerID="79a83c22f9d51a9a77687a96726f5ca3286a7dc819100c12efb042954adaee13" exitCode=0 Nov 25 09:27:57 crc kubenswrapper[4687]: I1125 09:27:57.019671 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" event={"ID":"0d29aa5d-d14b-4a11-9929-84c1770afb05","Type":"ContainerDied","Data":"79a83c22f9d51a9a77687a96726f5ca3286a7dc819100c12efb042954adaee13"} Nov 25 09:27:58 crc kubenswrapper[4687]: I1125 09:27:58.444905 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:58 crc kubenswrapper[4687]: I1125 09:27:58.505166 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ldbtv\" (UniqueName: \"kubernetes.io/projected/0d29aa5d-d14b-4a11-9929-84c1770afb05-kube-api-access-ldbtv\") pod \"0d29aa5d-d14b-4a11-9929-84c1770afb05\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " Nov 25 09:27:58 crc kubenswrapper[4687]: I1125 09:27:58.505318 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-inventory\") pod \"0d29aa5d-d14b-4a11-9929-84c1770afb05\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " Nov 25 09:27:58 crc kubenswrapper[4687]: I1125 09:27:58.505383 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-ssh-key\") pod \"0d29aa5d-d14b-4a11-9929-84c1770afb05\" (UID: \"0d29aa5d-d14b-4a11-9929-84c1770afb05\") " Nov 25 09:27:58 crc kubenswrapper[4687]: I1125 09:27:58.518772 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d29aa5d-d14b-4a11-9929-84c1770afb05-kube-api-access-ldbtv" (OuterVolumeSpecName: "kube-api-access-ldbtv") pod "0d29aa5d-d14b-4a11-9929-84c1770afb05" (UID: "0d29aa5d-d14b-4a11-9929-84c1770afb05"). InnerVolumeSpecName "kube-api-access-ldbtv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:27:58 crc kubenswrapper[4687]: I1125 09:27:58.534603 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-inventory" (OuterVolumeSpecName: "inventory") pod "0d29aa5d-d14b-4a11-9929-84c1770afb05" (UID: "0d29aa5d-d14b-4a11-9929-84c1770afb05"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:27:58 crc kubenswrapper[4687]: I1125 09:27:58.537861 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0d29aa5d-d14b-4a11-9929-84c1770afb05" (UID: "0d29aa5d-d14b-4a11-9929-84c1770afb05"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:27:58 crc kubenswrapper[4687]: I1125 09:27:58.607433 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:58 crc kubenswrapper[4687]: I1125 09:27:58.607466 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0d29aa5d-d14b-4a11-9929-84c1770afb05-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:58 crc kubenswrapper[4687]: I1125 09:27:58.607476 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ldbtv\" (UniqueName: \"kubernetes.io/projected/0d29aa5d-d14b-4a11-9929-84c1770afb05-kube-api-access-ldbtv\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.046157 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" event={"ID":"0d29aa5d-d14b-4a11-9929-84c1770afb05","Type":"ContainerDied","Data":"f9a1149b122d8fe8d21849ae2c518173211f40b4411aa870407b280d299a71f9"} Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.046205 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xp2dr" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.046241 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9a1149b122d8fe8d21849ae2c518173211f40b4411aa870407b280d299a71f9" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.147706 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg"] Nov 25 09:27:59 crc kubenswrapper[4687]: E1125 09:27:59.150605 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d29aa5d-d14b-4a11-9929-84c1770afb05" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.150635 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d29aa5d-d14b-4a11-9929-84c1770afb05" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.150979 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d29aa5d-d14b-4a11-9929-84c1770afb05" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.151783 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.154100 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.154627 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.154714 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.157171 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.163216 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg"] Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.220582 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.220650 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.220732 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.220809 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f69cm\" (UniqueName: \"kubernetes.io/projected/34a409ae-58d8-4746-83e8-f93d0e449216-kube-api-access-f69cm\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.322833 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.322961 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.323045 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.323126 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f69cm\" (UniqueName: \"kubernetes.io/projected/34a409ae-58d8-4746-83e8-f93d0e449216-kube-api-access-f69cm\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.329786 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.330311 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.331132 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.347667 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f69cm\" (UniqueName: \"kubernetes.io/projected/34a409ae-58d8-4746-83e8-f93d0e449216-kube-api-access-f69cm\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:27:59 crc kubenswrapper[4687]: I1125 09:27:59.474897 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:28:00 crc kubenswrapper[4687]: I1125 09:28:00.046238 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg"] Nov 25 09:28:01 crc kubenswrapper[4687]: I1125 09:28:01.074609 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" event={"ID":"34a409ae-58d8-4746-83e8-f93d0e449216","Type":"ContainerStarted","Data":"88cacfc66364164d5a82446240b654cbcb8009c01462a8de2db4de61c41096ed"} Nov 25 09:28:01 crc kubenswrapper[4687]: I1125 09:28:01.074960 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" event={"ID":"34a409ae-58d8-4746-83e8-f93d0e449216","Type":"ContainerStarted","Data":"978b10c4558d81c08034cef7fd33c7661e8ddd0fa292dcb8c83e0455153c0808"} Nov 25 09:28:01 crc kubenswrapper[4687]: I1125 09:28:01.104998 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" podStartSLOduration=1.498442093 podStartE2EDuration="2.104973077s" podCreationTimestamp="2025-11-25 09:27:59 +0000 UTC" firstStartedPulling="2025-11-25 09:28:00.057891066 +0000 UTC m=+1475.111530784" lastFinishedPulling="2025-11-25 09:28:00.664422 +0000 UTC m=+1475.718061768" observedRunningTime="2025-11-25 09:28:01.095176161 +0000 UTC m=+1476.148815879" watchObservedRunningTime="2025-11-25 09:28:01.104973077 +0000 UTC m=+1476.158612825" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.485660 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mmkf2"] Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.488171 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.496390 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmkf2"] Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.584596 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-catalog-content\") pod \"redhat-marketplace-mmkf2\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.584659 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-utilities\") pod \"redhat-marketplace-mmkf2\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.584807 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsb4h\" (UniqueName: \"kubernetes.io/projected/a7c5b6d0-c261-49ff-9a39-b51269958eab-kube-api-access-bsb4h\") pod \"redhat-marketplace-mmkf2\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.687301 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-catalog-content\") pod \"redhat-marketplace-mmkf2\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.687381 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-utilities\") pod \"redhat-marketplace-mmkf2\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.687463 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsb4h\" (UniqueName: \"kubernetes.io/projected/a7c5b6d0-c261-49ff-9a39-b51269958eab-kube-api-access-bsb4h\") pod \"redhat-marketplace-mmkf2\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.687919 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-catalog-content\") pod \"redhat-marketplace-mmkf2\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.688006 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-utilities\") pod \"redhat-marketplace-mmkf2\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.710184 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsb4h\" (UniqueName: \"kubernetes.io/projected/a7c5b6d0-c261-49ff-9a39-b51269958eab-kube-api-access-bsb4h\") pod \"redhat-marketplace-mmkf2\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:12 crc kubenswrapper[4687]: I1125 09:28:12.850402 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:14 crc kubenswrapper[4687]: I1125 09:28:13.345416 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmkf2"] Nov 25 09:28:14 crc kubenswrapper[4687]: I1125 09:28:14.203658 4687 generic.go:334] "Generic (PLEG): container finished" podID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerID="495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf" exitCode=0 Nov 25 09:28:14 crc kubenswrapper[4687]: I1125 09:28:14.203715 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmkf2" event={"ID":"a7c5b6d0-c261-49ff-9a39-b51269958eab","Type":"ContainerDied","Data":"495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf"} Nov 25 09:28:14 crc kubenswrapper[4687]: I1125 09:28:14.203966 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmkf2" event={"ID":"a7c5b6d0-c261-49ff-9a39-b51269958eab","Type":"ContainerStarted","Data":"cbd1888601a23fc6effa71c026cff425fc043c9a781358e31947c6dbe8b275ae"} Nov 25 09:28:15 crc kubenswrapper[4687]: I1125 09:28:15.215857 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmkf2" event={"ID":"a7c5b6d0-c261-49ff-9a39-b51269958eab","Type":"ContainerStarted","Data":"753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d"} Nov 25 09:28:16 crc kubenswrapper[4687]: I1125 09:28:16.226521 4687 generic.go:334] "Generic (PLEG): container finished" podID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerID="753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d" exitCode=0 Nov 25 09:28:16 crc kubenswrapper[4687]: I1125 09:28:16.226564 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmkf2" event={"ID":"a7c5b6d0-c261-49ff-9a39-b51269958eab","Type":"ContainerDied","Data":"753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d"} Nov 25 09:28:17 crc kubenswrapper[4687]: I1125 09:28:17.236774 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmkf2" event={"ID":"a7c5b6d0-c261-49ff-9a39-b51269958eab","Type":"ContainerStarted","Data":"0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5"} Nov 25 09:28:17 crc kubenswrapper[4687]: I1125 09:28:17.255184 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mmkf2" podStartSLOduration=2.731182114 podStartE2EDuration="5.255165501s" podCreationTimestamp="2025-11-25 09:28:12 +0000 UTC" firstStartedPulling="2025-11-25 09:28:14.205004502 +0000 UTC m=+1489.258644220" lastFinishedPulling="2025-11-25 09:28:16.728987889 +0000 UTC m=+1491.782627607" observedRunningTime="2025-11-25 09:28:17.252003725 +0000 UTC m=+1492.305643463" watchObservedRunningTime="2025-11-25 09:28:17.255165501 +0000 UTC m=+1492.308805219" Nov 25 09:28:22 crc kubenswrapper[4687]: I1125 09:28:22.850824 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:22 crc kubenswrapper[4687]: I1125 09:28:22.851449 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:22 crc kubenswrapper[4687]: I1125 09:28:22.892486 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:23 crc kubenswrapper[4687]: I1125 09:28:23.351654 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:23 crc kubenswrapper[4687]: I1125 09:28:23.398192 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmkf2"] Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.321656 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mmkf2" podUID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerName="registry-server" containerID="cri-o://0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5" gracePeriod=2 Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.765466 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.861214 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-catalog-content\") pod \"a7c5b6d0-c261-49ff-9a39-b51269958eab\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.861368 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsb4h\" (UniqueName: \"kubernetes.io/projected/a7c5b6d0-c261-49ff-9a39-b51269958eab-kube-api-access-bsb4h\") pod \"a7c5b6d0-c261-49ff-9a39-b51269958eab\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.861536 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-utilities\") pod \"a7c5b6d0-c261-49ff-9a39-b51269958eab\" (UID: \"a7c5b6d0-c261-49ff-9a39-b51269958eab\") " Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.862354 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-utilities" (OuterVolumeSpecName: "utilities") pod "a7c5b6d0-c261-49ff-9a39-b51269958eab" (UID: "a7c5b6d0-c261-49ff-9a39-b51269958eab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.872791 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7c5b6d0-c261-49ff-9a39-b51269958eab-kube-api-access-bsb4h" (OuterVolumeSpecName: "kube-api-access-bsb4h") pod "a7c5b6d0-c261-49ff-9a39-b51269958eab" (UID: "a7c5b6d0-c261-49ff-9a39-b51269958eab"). InnerVolumeSpecName "kube-api-access-bsb4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.888919 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a7c5b6d0-c261-49ff-9a39-b51269958eab" (UID: "a7c5b6d0-c261-49ff-9a39-b51269958eab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.963786 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.963833 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsb4h\" (UniqueName: \"kubernetes.io/projected/a7c5b6d0-c261-49ff-9a39-b51269958eab-kube-api-access-bsb4h\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:25 crc kubenswrapper[4687]: I1125 09:28:25.963848 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7c5b6d0-c261-49ff-9a39-b51269958eab-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.333435 4687 generic.go:334] "Generic (PLEG): container finished" podID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerID="0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5" exitCode=0 Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.333518 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmkf2" event={"ID":"a7c5b6d0-c261-49ff-9a39-b51269958eab","Type":"ContainerDied","Data":"0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5"} Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.333587 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmkf2" event={"ID":"a7c5b6d0-c261-49ff-9a39-b51269958eab","Type":"ContainerDied","Data":"cbd1888601a23fc6effa71c026cff425fc043c9a781358e31947c6dbe8b275ae"} Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.333538 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmkf2" Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.333610 4687 scope.go:117] "RemoveContainer" containerID="0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5" Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.360372 4687 scope.go:117] "RemoveContainer" containerID="753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d" Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.371700 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmkf2"] Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.381548 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmkf2"] Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.392758 4687 scope.go:117] "RemoveContainer" containerID="495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf" Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.433107 4687 scope.go:117] "RemoveContainer" containerID="0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5" Nov 25 09:28:26 crc kubenswrapper[4687]: E1125 09:28:26.433575 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5\": container with ID starting with 0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5 not found: ID does not exist" containerID="0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5" Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.433617 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5"} err="failed to get container status \"0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5\": rpc error: code = NotFound desc = could not find container \"0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5\": container with ID starting with 0c287642e5bd34b3711f11afacf63bab263a70f2da7150ce9609ed3903ed09e5 not found: ID does not exist" Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.433645 4687 scope.go:117] "RemoveContainer" containerID="753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d" Nov 25 09:28:26 crc kubenswrapper[4687]: E1125 09:28:26.433995 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d\": container with ID starting with 753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d not found: ID does not exist" containerID="753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d" Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.434030 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d"} err="failed to get container status \"753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d\": rpc error: code = NotFound desc = could not find container \"753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d\": container with ID starting with 753bb7edf9d67bbe4bd6551a789c8308b03770fe256dfe00d9b6cbeaf4facd6d not found: ID does not exist" Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.434055 4687 scope.go:117] "RemoveContainer" containerID="495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf" Nov 25 09:28:26 crc kubenswrapper[4687]: E1125 09:28:26.434263 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf\": container with ID starting with 495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf not found: ID does not exist" containerID="495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf" Nov 25 09:28:26 crc kubenswrapper[4687]: I1125 09:28:26.434292 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf"} err="failed to get container status \"495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf\": rpc error: code = NotFound desc = could not find container \"495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf\": container with ID starting with 495c9d882679b615a30b54ac2da55bd2c62cfc09a2e8b1598ec0458dfbe91fbf not found: ID does not exist" Nov 25 09:28:27 crc kubenswrapper[4687]: I1125 09:28:27.746265 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7c5b6d0-c261-49ff-9a39-b51269958eab" path="/var/lib/kubelet/pods/a7c5b6d0-c261-49ff-9a39-b51269958eab/volumes" Nov 25 09:28:35 crc kubenswrapper[4687]: I1125 09:28:35.415563 4687 scope.go:117] "RemoveContainer" containerID="008cef6255ef85911a6ffa5ef19ef4d3aef603586a8c0f4d6bd7a6b2ac9b0835" Nov 25 09:29:35 crc kubenswrapper[4687]: I1125 09:29:35.498191 4687 scope.go:117] "RemoveContainer" containerID="dfc8332f355812154f4cd88cae810d7278e3be5e5005c77d3039c8bda72bab8c" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.174178 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm"] Nov 25 09:30:00 crc kubenswrapper[4687]: E1125 09:30:00.175319 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerName="registry-server" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.175342 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerName="registry-server" Nov 25 09:30:00 crc kubenswrapper[4687]: E1125 09:30:00.175357 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerName="extract-content" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.175366 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerName="extract-content" Nov 25 09:30:00 crc kubenswrapper[4687]: E1125 09:30:00.175423 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerName="extract-utilities" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.175439 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerName="extract-utilities" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.175743 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7c5b6d0-c261-49ff-9a39-b51269958eab" containerName="registry-server" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.176764 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.180936 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.181404 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.204708 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm"] Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.311096 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6e34247b-6e99-4afd-988f-a22d68fd3858-config-volume\") pod \"collect-profiles-29401050-crkvm\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.311165 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65l26\" (UniqueName: \"kubernetes.io/projected/6e34247b-6e99-4afd-988f-a22d68fd3858-kube-api-access-65l26\") pod \"collect-profiles-29401050-crkvm\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.311377 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6e34247b-6e99-4afd-988f-a22d68fd3858-secret-volume\") pod \"collect-profiles-29401050-crkvm\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.413755 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6e34247b-6e99-4afd-988f-a22d68fd3858-config-volume\") pod \"collect-profiles-29401050-crkvm\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.413817 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65l26\" (UniqueName: \"kubernetes.io/projected/6e34247b-6e99-4afd-988f-a22d68fd3858-kube-api-access-65l26\") pod \"collect-profiles-29401050-crkvm\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.413916 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6e34247b-6e99-4afd-988f-a22d68fd3858-secret-volume\") pod \"collect-profiles-29401050-crkvm\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.414836 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6e34247b-6e99-4afd-988f-a22d68fd3858-config-volume\") pod \"collect-profiles-29401050-crkvm\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.420856 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6e34247b-6e99-4afd-988f-a22d68fd3858-secret-volume\") pod \"collect-profiles-29401050-crkvm\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.433063 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65l26\" (UniqueName: \"kubernetes.io/projected/6e34247b-6e99-4afd-988f-a22d68fd3858-kube-api-access-65l26\") pod \"collect-profiles-29401050-crkvm\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:00 crc kubenswrapper[4687]: I1125 09:30:00.536057 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:01 crc kubenswrapper[4687]: I1125 09:30:01.016184 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm"] Nov 25 09:30:01 crc kubenswrapper[4687]: I1125 09:30:01.251711 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" event={"ID":"6e34247b-6e99-4afd-988f-a22d68fd3858","Type":"ContainerStarted","Data":"7c999de126a90bde5984efae4150c084a479090bc125c8847297b96235c76033"} Nov 25 09:30:02 crc kubenswrapper[4687]: I1125 09:30:02.266731 4687 generic.go:334] "Generic (PLEG): container finished" podID="6e34247b-6e99-4afd-988f-a22d68fd3858" containerID="9a625a576a7b3e429db1c99886f52c13e6ffb43c9f74fd1ba78a9b708e3541b7" exitCode=0 Nov 25 09:30:02 crc kubenswrapper[4687]: I1125 09:30:02.266881 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" event={"ID":"6e34247b-6e99-4afd-988f-a22d68fd3858","Type":"ContainerDied","Data":"9a625a576a7b3e429db1c99886f52c13e6ffb43c9f74fd1ba78a9b708e3541b7"} Nov 25 09:30:03 crc kubenswrapper[4687]: I1125 09:30:03.599029 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:03 crc kubenswrapper[4687]: I1125 09:30:03.790479 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65l26\" (UniqueName: \"kubernetes.io/projected/6e34247b-6e99-4afd-988f-a22d68fd3858-kube-api-access-65l26\") pod \"6e34247b-6e99-4afd-988f-a22d68fd3858\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " Nov 25 09:30:03 crc kubenswrapper[4687]: I1125 09:30:03.790984 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6e34247b-6e99-4afd-988f-a22d68fd3858-config-volume\") pod \"6e34247b-6e99-4afd-988f-a22d68fd3858\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " Nov 25 09:30:03 crc kubenswrapper[4687]: I1125 09:30:03.791081 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6e34247b-6e99-4afd-988f-a22d68fd3858-secret-volume\") pod \"6e34247b-6e99-4afd-988f-a22d68fd3858\" (UID: \"6e34247b-6e99-4afd-988f-a22d68fd3858\") " Nov 25 09:30:03 crc kubenswrapper[4687]: I1125 09:30:03.791652 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e34247b-6e99-4afd-988f-a22d68fd3858-config-volume" (OuterVolumeSpecName: "config-volume") pod "6e34247b-6e99-4afd-988f-a22d68fd3858" (UID: "6e34247b-6e99-4afd-988f-a22d68fd3858"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:30:03 crc kubenswrapper[4687]: I1125 09:30:03.796645 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e34247b-6e99-4afd-988f-a22d68fd3858-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6e34247b-6e99-4afd-988f-a22d68fd3858" (UID: "6e34247b-6e99-4afd-988f-a22d68fd3858"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:30:03 crc kubenswrapper[4687]: I1125 09:30:03.796644 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e34247b-6e99-4afd-988f-a22d68fd3858-kube-api-access-65l26" (OuterVolumeSpecName: "kube-api-access-65l26") pod "6e34247b-6e99-4afd-988f-a22d68fd3858" (UID: "6e34247b-6e99-4afd-988f-a22d68fd3858"). InnerVolumeSpecName "kube-api-access-65l26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:30:03 crc kubenswrapper[4687]: I1125 09:30:03.893860 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65l26\" (UniqueName: \"kubernetes.io/projected/6e34247b-6e99-4afd-988f-a22d68fd3858-kube-api-access-65l26\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:03 crc kubenswrapper[4687]: I1125 09:30:03.894092 4687 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6e34247b-6e99-4afd-988f-a22d68fd3858-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:03 crc kubenswrapper[4687]: I1125 09:30:03.894266 4687 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6e34247b-6e99-4afd-988f-a22d68fd3858-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:04 crc kubenswrapper[4687]: I1125 09:30:04.295207 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" event={"ID":"6e34247b-6e99-4afd-988f-a22d68fd3858","Type":"ContainerDied","Data":"7c999de126a90bde5984efae4150c084a479090bc125c8847297b96235c76033"} Nov 25 09:30:04 crc kubenswrapper[4687]: I1125 09:30:04.295250 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c999de126a90bde5984efae4150c084a479090bc125c8847297b96235c76033" Nov 25 09:30:04 crc kubenswrapper[4687]: I1125 09:30:04.295269 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm" Nov 25 09:30:23 crc kubenswrapper[4687]: I1125 09:30:23.845082 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:30:23 crc kubenswrapper[4687]: I1125 09:30:23.845672 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:30:53 crc kubenswrapper[4687]: I1125 09:30:53.844816 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:30:53 crc kubenswrapper[4687]: I1125 09:30:53.845408 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:31:23 crc kubenswrapper[4687]: I1125 09:31:23.844883 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:31:23 crc kubenswrapper[4687]: I1125 09:31:23.845625 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:31:23 crc kubenswrapper[4687]: I1125 09:31:23.845696 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:31:23 crc kubenswrapper[4687]: I1125 09:31:23.846911 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:31:23 crc kubenswrapper[4687]: I1125 09:31:23.847024 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" gracePeriod=600 Nov 25 09:31:24 crc kubenswrapper[4687]: E1125 09:31:24.042896 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:31:24 crc kubenswrapper[4687]: I1125 09:31:24.081903 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" exitCode=0 Nov 25 09:31:24 crc kubenswrapper[4687]: I1125 09:31:24.081963 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895"} Nov 25 09:31:24 crc kubenswrapper[4687]: I1125 09:31:24.082002 4687 scope.go:117] "RemoveContainer" containerID="026b07db34bc04118ebe444d3596042b6afeddebbaa71ea4729e8d639abf5885" Nov 25 09:31:24 crc kubenswrapper[4687]: I1125 09:31:24.082871 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:31:24 crc kubenswrapper[4687]: E1125 09:31:24.083416 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:31:37 crc kubenswrapper[4687]: I1125 09:31:37.735402 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:31:37 crc kubenswrapper[4687]: E1125 09:31:37.736219 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:31:49 crc kubenswrapper[4687]: I1125 09:31:49.734929 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:31:49 crc kubenswrapper[4687]: E1125 09:31:49.735725 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:31:52 crc kubenswrapper[4687]: I1125 09:31:52.051181 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5efa-account-create-fz9hn"] Nov 25 09:31:52 crc kubenswrapper[4687]: I1125 09:31:52.061151 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-02d2-account-create-wz57w"] Nov 25 09:31:52 crc kubenswrapper[4687]: I1125 09:31:52.071188 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5efa-account-create-fz9hn"] Nov 25 09:31:52 crc kubenswrapper[4687]: I1125 09:31:52.079436 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-02d2-account-create-wz57w"] Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.037431 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-rf7rz"] Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.047513 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-cnhz2"] Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.057414 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bd1e-account-create-5r2nk"] Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.066630 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-29bts"] Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.076433 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bd1e-account-create-5r2nk"] Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.085387 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-rf7rz"] Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.094605 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-cnhz2"] Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.107064 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-29bts"] Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.749169 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="070421fb-5d0d-4088-bcae-f6d19fdc21fa" path="/var/lib/kubelet/pods/070421fb-5d0d-4088-bcae-f6d19fdc21fa/volumes" Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.749999 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27fd9468-4e1f-4cc9-9711-6e378f4236ec" path="/var/lib/kubelet/pods/27fd9468-4e1f-4cc9-9711-6e378f4236ec/volumes" Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.751098 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fb1585c-c72a-44ee-8614-3a84312c01ab" path="/var/lib/kubelet/pods/4fb1585c-c72a-44ee-8614-3a84312c01ab/volumes" Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.751927 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50" path="/var/lib/kubelet/pods/bc4fe59d-0ab5-4f22-b2f5-7b64514b7e50/volumes" Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.753889 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3612478-e4e2-49bb-aecc-1eb23c44975b" path="/var/lib/kubelet/pods/d3612478-e4e2-49bb-aecc-1eb23c44975b/volumes" Nov 25 09:31:53 crc kubenswrapper[4687]: I1125 09:31:53.754935 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2370488-8d98-49cc-acc8-52f5c5c77de7" path="/var/lib/kubelet/pods/f2370488-8d98-49cc-acc8-52f5c5c77de7/volumes" Nov 25 09:32:00 crc kubenswrapper[4687]: I1125 09:32:00.734734 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:32:00 crc kubenswrapper[4687]: E1125 09:32:00.735691 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:32:13 crc kubenswrapper[4687]: I1125 09:32:13.736772 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:32:13 crc kubenswrapper[4687]: E1125 09:32:13.737671 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:32:21 crc kubenswrapper[4687]: I1125 09:32:21.046271 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-2jfzx"] Nov 25 09:32:21 crc kubenswrapper[4687]: I1125 09:32:21.054278 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-2jfzx"] Nov 25 09:32:21 crc kubenswrapper[4687]: I1125 09:32:21.758969 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c723bbb1-d23f-4a7a-9e52-ba3279dd969b" path="/var/lib/kubelet/pods/c723bbb1-d23f-4a7a-9e52-ba3279dd969b/volumes" Nov 25 09:32:26 crc kubenswrapper[4687]: I1125 09:32:26.735219 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:32:26 crc kubenswrapper[4687]: E1125 09:32:26.736021 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.033216 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-86dxw"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.046300 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-86dxw"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.061419 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-e3fb-account-create-w7xhv"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.069735 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-twnk8"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.079604 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-4573-account-create-chmkq"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.084374 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-e3fb-account-create-w7xhv"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.091626 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-twnk8"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.098334 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-4573-account-create-chmkq"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.105104 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-gtv52"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.112154 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-gtv52"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.119583 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-de73-account-create-p9ft5"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.126286 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-de73-account-create-p9ft5"] Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.709208 4687 generic.go:334] "Generic (PLEG): container finished" podID="34a409ae-58d8-4746-83e8-f93d0e449216" containerID="88cacfc66364164d5a82446240b654cbcb8009c01462a8de2db4de61c41096ed" exitCode=0 Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.709249 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" event={"ID":"34a409ae-58d8-4746-83e8-f93d0e449216","Type":"ContainerDied","Data":"88cacfc66364164d5a82446240b654cbcb8009c01462a8de2db4de61c41096ed"} Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.747781 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10b1fc74-8906-4881-a45c-e812501c6f2f" path="/var/lib/kubelet/pods/10b1fc74-8906-4881-a45c-e812501c6f2f/volumes" Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.749017 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf" path="/var/lib/kubelet/pods/1fc29bb0-5f3c-4b4e-9537-21ffc8179bcf/volumes" Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.750269 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57674a17-12fc-4749-bcc9-19a92f1c5016" path="/var/lib/kubelet/pods/57674a17-12fc-4749-bcc9-19a92f1c5016/volumes" Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.751366 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="920e2328-777b-4ee4-8bda-f5dc74435740" path="/var/lib/kubelet/pods/920e2328-777b-4ee4-8bda-f5dc74435740/volumes" Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.753824 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95391d75-b60d-49a7-892d-4236d940363c" path="/var/lib/kubelet/pods/95391d75-b60d-49a7-892d-4236d940363c/volumes" Nov 25 09:32:31 crc kubenswrapper[4687]: I1125 09:32:31.754855 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c" path="/var/lib/kubelet/pods/ebc70f96-0dd6-4500-a16f-3d4b0ead0f8c/volumes" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.113766 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.183804 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f69cm\" (UniqueName: \"kubernetes.io/projected/34a409ae-58d8-4746-83e8-f93d0e449216-kube-api-access-f69cm\") pod \"34a409ae-58d8-4746-83e8-f93d0e449216\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.183888 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-ssh-key\") pod \"34a409ae-58d8-4746-83e8-f93d0e449216\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.183966 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-bootstrap-combined-ca-bundle\") pod \"34a409ae-58d8-4746-83e8-f93d0e449216\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.184007 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-inventory\") pod \"34a409ae-58d8-4746-83e8-f93d0e449216\" (UID: \"34a409ae-58d8-4746-83e8-f93d0e449216\") " Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.190795 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "34a409ae-58d8-4746-83e8-f93d0e449216" (UID: "34a409ae-58d8-4746-83e8-f93d0e449216"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.190830 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34a409ae-58d8-4746-83e8-f93d0e449216-kube-api-access-f69cm" (OuterVolumeSpecName: "kube-api-access-f69cm") pod "34a409ae-58d8-4746-83e8-f93d0e449216" (UID: "34a409ae-58d8-4746-83e8-f93d0e449216"). InnerVolumeSpecName "kube-api-access-f69cm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.211693 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "34a409ae-58d8-4746-83e8-f93d0e449216" (UID: "34a409ae-58d8-4746-83e8-f93d0e449216"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.213174 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-inventory" (OuterVolumeSpecName: "inventory") pod "34a409ae-58d8-4746-83e8-f93d0e449216" (UID: "34a409ae-58d8-4746-83e8-f93d0e449216"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.286468 4687 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.286604 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.286617 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f69cm\" (UniqueName: \"kubernetes.io/projected/34a409ae-58d8-4746-83e8-f93d0e449216-kube-api-access-f69cm\") on node \"crc\" DevicePath \"\"" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.286630 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/34a409ae-58d8-4746-83e8-f93d0e449216-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.789458 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" event={"ID":"34a409ae-58d8-4746-83e8-f93d0e449216","Type":"ContainerDied","Data":"978b10c4558d81c08034cef7fd33c7661e8ddd0fa292dcb8c83e0455153c0808"} Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.789533 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="978b10c4558d81c08034cef7fd33c7661e8ddd0fa292dcb8c83e0455153c0808" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.789626 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.833766 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94"] Nov 25 09:32:33 crc kubenswrapper[4687]: E1125 09:32:33.834690 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34a409ae-58d8-4746-83e8-f93d0e449216" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.834819 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="34a409ae-58d8-4746-83e8-f93d0e449216" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 09:32:33 crc kubenswrapper[4687]: E1125 09:32:33.834949 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e34247b-6e99-4afd-988f-a22d68fd3858" containerName="collect-profiles" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.835045 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e34247b-6e99-4afd-988f-a22d68fd3858" containerName="collect-profiles" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.835464 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="34a409ae-58d8-4746-83e8-f93d0e449216" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.835665 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e34247b-6e99-4afd-988f-a22d68fd3858" containerName="collect-profiles" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.836904 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.844478 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.844764 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.844903 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.845011 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.847084 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94"] Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.906847 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxkk5\" (UniqueName: \"kubernetes.io/projected/c2600c05-1335-45a5-b7d3-4bfd661d7884-kube-api-access-lxkk5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-z6m94\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.906917 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-z6m94\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:33 crc kubenswrapper[4687]: I1125 09:32:33.907038 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-z6m94\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:34 crc kubenswrapper[4687]: I1125 09:32:34.008986 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-z6m94\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:34 crc kubenswrapper[4687]: I1125 09:32:34.009256 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxkk5\" (UniqueName: \"kubernetes.io/projected/c2600c05-1335-45a5-b7d3-4bfd661d7884-kube-api-access-lxkk5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-z6m94\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:34 crc kubenswrapper[4687]: I1125 09:32:34.009351 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-z6m94\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:34 crc kubenswrapper[4687]: I1125 09:32:34.026837 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-z6m94\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:34 crc kubenswrapper[4687]: I1125 09:32:34.026922 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-z6m94\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:34 crc kubenswrapper[4687]: I1125 09:32:34.032377 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxkk5\" (UniqueName: \"kubernetes.io/projected/c2600c05-1335-45a5-b7d3-4bfd661d7884-kube-api-access-lxkk5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-z6m94\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:34 crc kubenswrapper[4687]: I1125 09:32:34.163398 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:32:34 crc kubenswrapper[4687]: I1125 09:32:34.698430 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94"] Nov 25 09:32:34 crc kubenswrapper[4687]: I1125 09:32:34.708406 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:32:34 crc kubenswrapper[4687]: I1125 09:32:34.799330 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" event={"ID":"c2600c05-1335-45a5-b7d3-4bfd661d7884","Type":"ContainerStarted","Data":"696f5d3dab2c2d918b9c7e1a6b7f8d23ecf88c34743d0dd09c22ef0cc4b99627"} Nov 25 09:32:35 crc kubenswrapper[4687]: I1125 09:32:35.612426 4687 scope.go:117] "RemoveContainer" containerID="7270282671fe1dcb14bc4acada6c83d2dde70fd11596965d46b046a4e4e7b1b8" Nov 25 09:32:35 crc kubenswrapper[4687]: I1125 09:32:35.749987 4687 scope.go:117] "RemoveContainer" containerID="dc9be05bf5bd9171d7d16c604ca30f3d04ab51da4ab237d2c71eb9e331dfe63d" Nov 25 09:32:35 crc kubenswrapper[4687]: I1125 09:32:35.913110 4687 scope.go:117] "RemoveContainer" containerID="5625287b135b93d6bcf52e4fa8cd16090ebdf51ec368497b1d478502f2036f0b" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.280496 4687 scope.go:117] "RemoveContainer" containerID="dbb65b8c4108d0bfc32e690df97534d5d0034a3ac627fb87d83f481d1e4f1ac6" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.315492 4687 scope.go:117] "RemoveContainer" containerID="36b614d18c67fd8b04aa78ad48600ee75c2fbbda3aa302f30b939308daca66a8" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.434001 4687 scope.go:117] "RemoveContainer" containerID="3a669b3bdbbfd9112d5f4ac21b84ed14248341d3c43fb8c11364760c13fc25da" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.465522 4687 scope.go:117] "RemoveContainer" containerID="ff8f7d54639c140489b1e10a5729e8b959822917f0b9761b9848307e7c07f584" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.493814 4687 scope.go:117] "RemoveContainer" containerID="0bc179e62474513e3384bc30bb76a711049084865fff62813e5592e387828d3a" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.526761 4687 scope.go:117] "RemoveContainer" containerID="77036674894a1e19533a43aaaf9c024bf19dc8d581ee94d5f53ec3a9c2fcf466" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.547269 4687 scope.go:117] "RemoveContainer" containerID="cb1ee2042b781d7c9c11978d4b9103b12555c090fc9b4ee25daefca35d2e42d8" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.569599 4687 scope.go:117] "RemoveContainer" containerID="6c51f725c9a9061eea651499f0e763af58356c240ea7d27336cc2c01cb025de2" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.592013 4687 scope.go:117] "RemoveContainer" containerID="d1e7ed6f76d1852f73fe9a4de87d76c3f9ca66778fd1d386023a31c1b5112e2b" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.617892 4687 scope.go:117] "RemoveContainer" containerID="c7ccf1d84702ff8e9d7c8a779246903c8ed9393e0e9556c3469fcce052c98806" Nov 25 09:32:36 crc kubenswrapper[4687]: I1125 09:32:36.835562 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" event={"ID":"c2600c05-1335-45a5-b7d3-4bfd661d7884","Type":"ContainerStarted","Data":"5d67c2c086a0b1bd3cd606c20ab547a69f12577cdebd1741ba0f3fc835f58aa8"} Nov 25 09:32:37 crc kubenswrapper[4687]: I1125 09:32:37.040420 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-pstdr"] Nov 25 09:32:37 crc kubenswrapper[4687]: I1125 09:32:37.055160 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-pstdr"] Nov 25 09:32:37 crc kubenswrapper[4687]: I1125 09:32:37.744717 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66108854-2b2f-49c8-be7a-b80e336dc27b" path="/var/lib/kubelet/pods/66108854-2b2f-49c8-be7a-b80e336dc27b/volumes" Nov 25 09:32:37 crc kubenswrapper[4687]: I1125 09:32:37.868250 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" podStartSLOduration=3.295446893 podStartE2EDuration="4.868230135s" podCreationTimestamp="2025-11-25 09:32:33 +0000 UTC" firstStartedPulling="2025-11-25 09:32:34.707818851 +0000 UTC m=+1749.761458609" lastFinishedPulling="2025-11-25 09:32:36.280602093 +0000 UTC m=+1751.334241851" observedRunningTime="2025-11-25 09:32:37.864821003 +0000 UTC m=+1752.918460741" watchObservedRunningTime="2025-11-25 09:32:37.868230135 +0000 UTC m=+1752.921869863" Nov 25 09:32:39 crc kubenswrapper[4687]: I1125 09:32:39.735891 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:32:39 crc kubenswrapper[4687]: E1125 09:32:39.736679 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:32:52 crc kubenswrapper[4687]: I1125 09:32:52.735367 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:32:52 crc kubenswrapper[4687]: E1125 09:32:52.736233 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:33:04 crc kubenswrapper[4687]: I1125 09:33:04.735136 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:33:04 crc kubenswrapper[4687]: E1125 09:33:04.736050 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:33:19 crc kubenswrapper[4687]: I1125 09:33:19.736200 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:33:19 crc kubenswrapper[4687]: E1125 09:33:19.737197 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:33:26 crc kubenswrapper[4687]: I1125 09:33:26.043825 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-765t4"] Nov 25 09:33:26 crc kubenswrapper[4687]: I1125 09:33:26.051138 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-765t4"] Nov 25 09:33:27 crc kubenswrapper[4687]: I1125 09:33:27.029214 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-gpc68"] Nov 25 09:33:27 crc kubenswrapper[4687]: I1125 09:33:27.036982 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-lvpd8"] Nov 25 09:33:27 crc kubenswrapper[4687]: I1125 09:33:27.045188 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-gpc68"] Nov 25 09:33:27 crc kubenswrapper[4687]: I1125 09:33:27.054480 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-lvpd8"] Nov 25 09:33:27 crc kubenswrapper[4687]: I1125 09:33:27.745018 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb5f689-fd43-4fa3-b5a9-6603155ff184" path="/var/lib/kubelet/pods/7bb5f689-fd43-4fa3-b5a9-6603155ff184/volumes" Nov 25 09:33:27 crc kubenswrapper[4687]: I1125 09:33:27.746088 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c2bb808-f45c-4126-94b9-36187402c9d7" path="/var/lib/kubelet/pods/7c2bb808-f45c-4126-94b9-36187402c9d7/volumes" Nov 25 09:33:27 crc kubenswrapper[4687]: I1125 09:33:27.746678 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8" path="/var/lib/kubelet/pods/baa66a5b-e68d-4ebe-b2ac-99dbf4cf6aa8/volumes" Nov 25 09:33:34 crc kubenswrapper[4687]: I1125 09:33:34.735594 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:33:34 crc kubenswrapper[4687]: E1125 09:33:34.736356 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:33:36 crc kubenswrapper[4687]: I1125 09:33:36.979759 4687 scope.go:117] "RemoveContainer" containerID="8dc9ece059db60550aa80335a9b974c0c67756ac0fb2b73973e878346866870b" Nov 25 09:33:37 crc kubenswrapper[4687]: I1125 09:33:37.015630 4687 scope.go:117] "RemoveContainer" containerID="cb2dd5ea0c36e152c7d609cbb53dc52ee7945b0dcff8874679650e5b3a9342c0" Nov 25 09:33:37 crc kubenswrapper[4687]: I1125 09:33:37.077278 4687 scope.go:117] "RemoveContainer" containerID="6f4f84f88751424025dafc2832f22dc1a206b03fc6b76ff91fdac97f54082182" Nov 25 09:33:37 crc kubenswrapper[4687]: I1125 09:33:37.112171 4687 scope.go:117] "RemoveContainer" containerID="1146d2e7a146cc0e8fcc48e0a7b1f5e635536b39096eff2f449140de8e3fe0f4" Nov 25 09:33:39 crc kubenswrapper[4687]: I1125 09:33:39.036769 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-8b7k2"] Nov 25 09:33:39 crc kubenswrapper[4687]: I1125 09:33:39.047750 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-8b7k2"] Nov 25 09:33:39 crc kubenswrapper[4687]: I1125 09:33:39.746157 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d974fcb-fbad-4f24-9857-a791205029a0" path="/var/lib/kubelet/pods/5d974fcb-fbad-4f24-9857-a791205029a0/volumes" Nov 25 09:33:41 crc kubenswrapper[4687]: I1125 09:33:41.054398 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-2h86j"] Nov 25 09:33:41 crc kubenswrapper[4687]: I1125 09:33:41.064217 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-2h86j"] Nov 25 09:33:41 crc kubenswrapper[4687]: I1125 09:33:41.745395 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53681654-97b6-4586-ba53-8b6b018e04fa" path="/var/lib/kubelet/pods/53681654-97b6-4586-ba53-8b6b018e04fa/volumes" Nov 25 09:33:45 crc kubenswrapper[4687]: I1125 09:33:45.743064 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:33:45 crc kubenswrapper[4687]: E1125 09:33:45.744021 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:33:59 crc kubenswrapper[4687]: I1125 09:33:59.736258 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:33:59 crc kubenswrapper[4687]: E1125 09:33:59.737428 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:34:10 crc kubenswrapper[4687]: I1125 09:34:10.735300 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:34:10 crc kubenswrapper[4687]: E1125 09:34:10.736753 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:34:23 crc kubenswrapper[4687]: I1125 09:34:23.051614 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-nnzf8"] Nov 25 09:34:23 crc kubenswrapper[4687]: I1125 09:34:23.060863 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-nnzf8"] Nov 25 09:34:23 crc kubenswrapper[4687]: I1125 09:34:23.747070 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f850ec56-21a5-4782-8aa3-2a1f53ddadb2" path="/var/lib/kubelet/pods/f850ec56-21a5-4782-8aa3-2a1f53ddadb2/volumes" Nov 25 09:34:24 crc kubenswrapper[4687]: I1125 09:34:24.034849 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-ll6h7"] Nov 25 09:34:24 crc kubenswrapper[4687]: I1125 09:34:24.045038 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-991b-account-create-kdxhh"] Nov 25 09:34:24 crc kubenswrapper[4687]: I1125 09:34:24.054869 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-6bad-account-create-2pbld"] Nov 25 09:34:24 crc kubenswrapper[4687]: I1125 09:34:24.065254 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-e925-account-create-wkg4s"] Nov 25 09:34:24 crc kubenswrapper[4687]: I1125 09:34:24.074198 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-4ndrh"] Nov 25 09:34:24 crc kubenswrapper[4687]: I1125 09:34:24.081649 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-ll6h7"] Nov 25 09:34:24 crc kubenswrapper[4687]: I1125 09:34:24.090769 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-4ndrh"] Nov 25 09:34:24 crc kubenswrapper[4687]: I1125 09:34:24.100595 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-6bad-account-create-2pbld"] Nov 25 09:34:24 crc kubenswrapper[4687]: I1125 09:34:24.107751 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-991b-account-create-kdxhh"] Nov 25 09:34:24 crc kubenswrapper[4687]: I1125 09:34:24.115931 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-e925-account-create-wkg4s"] Nov 25 09:34:25 crc kubenswrapper[4687]: I1125 09:34:25.742335 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:34:25 crc kubenswrapper[4687]: E1125 09:34:25.743736 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:34:25 crc kubenswrapper[4687]: I1125 09:34:25.746191 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1db82525-47aa-465b-b638-0d954b02e9b2" path="/var/lib/kubelet/pods/1db82525-47aa-465b-b638-0d954b02e9b2/volumes" Nov 25 09:34:25 crc kubenswrapper[4687]: I1125 09:34:25.746850 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d9195a1-3aeb-466d-970a-8abd134135c8" path="/var/lib/kubelet/pods/3d9195a1-3aeb-466d-970a-8abd134135c8/volumes" Nov 25 09:34:25 crc kubenswrapper[4687]: I1125 09:34:25.747419 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4261462d-4b8e-4739-b79f-882c109aa8be" path="/var/lib/kubelet/pods/4261462d-4b8e-4739-b79f-882c109aa8be/volumes" Nov 25 09:34:25 crc kubenswrapper[4687]: I1125 09:34:25.748030 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c17f596-473c-49f6-b2f2-56ee94f47c1b" path="/var/lib/kubelet/pods/8c17f596-473c-49f6-b2f2-56ee94f47c1b/volumes" Nov 25 09:34:25 crc kubenswrapper[4687]: I1125 09:34:25.749194 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3668763-e276-4dc6-bacc-3854d2a49983" path="/var/lib/kubelet/pods/a3668763-e276-4dc6-bacc-3854d2a49983/volumes" Nov 25 09:34:33 crc kubenswrapper[4687]: I1125 09:34:33.883990 4687 generic.go:334] "Generic (PLEG): container finished" podID="c2600c05-1335-45a5-b7d3-4bfd661d7884" containerID="5d67c2c086a0b1bd3cd606c20ab547a69f12577cdebd1741ba0f3fc835f58aa8" exitCode=0 Nov 25 09:34:33 crc kubenswrapper[4687]: I1125 09:34:33.884072 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" event={"ID":"c2600c05-1335-45a5-b7d3-4bfd661d7884","Type":"ContainerDied","Data":"5d67c2c086a0b1bd3cd606c20ab547a69f12577cdebd1741ba0f3fc835f58aa8"} Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.324482 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.468653 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-inventory\") pod \"c2600c05-1335-45a5-b7d3-4bfd661d7884\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.468745 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-ssh-key\") pod \"c2600c05-1335-45a5-b7d3-4bfd661d7884\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.468805 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxkk5\" (UniqueName: \"kubernetes.io/projected/c2600c05-1335-45a5-b7d3-4bfd661d7884-kube-api-access-lxkk5\") pod \"c2600c05-1335-45a5-b7d3-4bfd661d7884\" (UID: \"c2600c05-1335-45a5-b7d3-4bfd661d7884\") " Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.478739 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2600c05-1335-45a5-b7d3-4bfd661d7884-kube-api-access-lxkk5" (OuterVolumeSpecName: "kube-api-access-lxkk5") pod "c2600c05-1335-45a5-b7d3-4bfd661d7884" (UID: "c2600c05-1335-45a5-b7d3-4bfd661d7884"). InnerVolumeSpecName "kube-api-access-lxkk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.496930 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-inventory" (OuterVolumeSpecName: "inventory") pod "c2600c05-1335-45a5-b7d3-4bfd661d7884" (UID: "c2600c05-1335-45a5-b7d3-4bfd661d7884"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.503072 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c2600c05-1335-45a5-b7d3-4bfd661d7884" (UID: "c2600c05-1335-45a5-b7d3-4bfd661d7884"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.571232 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.571260 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2600c05-1335-45a5-b7d3-4bfd661d7884-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.571270 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxkk5\" (UniqueName: \"kubernetes.io/projected/c2600c05-1335-45a5-b7d3-4bfd661d7884-kube-api-access-lxkk5\") on node \"crc\" DevicePath \"\"" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.904980 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" event={"ID":"c2600c05-1335-45a5-b7d3-4bfd661d7884","Type":"ContainerDied","Data":"696f5d3dab2c2d918b9c7e1a6b7f8d23ecf88c34743d0dd09c22ef0cc4b99627"} Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.905345 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="696f5d3dab2c2d918b9c7e1a6b7f8d23ecf88c34743d0dd09c22ef0cc4b99627" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.905103 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-z6m94" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.980676 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr"] Nov 25 09:34:35 crc kubenswrapper[4687]: E1125 09:34:35.981151 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2600c05-1335-45a5-b7d3-4bfd661d7884" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.981176 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2600c05-1335-45a5-b7d3-4bfd661d7884" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.981410 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2600c05-1335-45a5-b7d3-4bfd661d7884" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.982151 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.984162 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.984842 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.985462 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.990084 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:34:35 crc kubenswrapper[4687]: I1125 09:34:35.991040 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr"] Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.078741 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.078795 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5xnv\" (UniqueName: \"kubernetes.io/projected/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-kube-api-access-s5xnv\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.078825 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.180854 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.180937 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5xnv\" (UniqueName: \"kubernetes.io/projected/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-kube-api-access-s5xnv\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.180980 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.185703 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.191094 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.210264 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5xnv\" (UniqueName: \"kubernetes.io/projected/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-kube-api-access-s5xnv\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.301955 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.599093 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr"] Nov 25 09:34:36 crc kubenswrapper[4687]: I1125 09:34:36.920141 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" event={"ID":"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc","Type":"ContainerStarted","Data":"c86251a95e98dd0aebb226e8926ad61b6a51920f3078bd75ab38188f7c54a995"} Nov 25 09:34:37 crc kubenswrapper[4687]: I1125 09:34:37.239463 4687 scope.go:117] "RemoveContainer" containerID="2d95512df8c52f6c6b354f331404d397597558cbd227487b04c0a69a3760675b" Nov 25 09:34:37 crc kubenswrapper[4687]: I1125 09:34:37.262741 4687 scope.go:117] "RemoveContainer" containerID="d5f98ca1adf1eff43472d45aa9dc38757306f456cf2c04649185d29a0443ccb1" Nov 25 09:34:37 crc kubenswrapper[4687]: I1125 09:34:37.320173 4687 scope.go:117] "RemoveContainer" containerID="7fbad29cb79250ca8b9ba033fa65403924ed6198ade23d2a20f9e346392b954b" Nov 25 09:34:37 crc kubenswrapper[4687]: I1125 09:34:37.355367 4687 scope.go:117] "RemoveContainer" containerID="63d830397315354ba571b8773524134aca212307a3a1921cfdf3cb8b70218a22" Nov 25 09:34:37 crc kubenswrapper[4687]: I1125 09:34:37.395614 4687 scope.go:117] "RemoveContainer" containerID="120c8e9dd480db83c22337cce02f25f60cda56a443c5e55303820d8ae6e84ea3" Nov 25 09:34:37 crc kubenswrapper[4687]: I1125 09:34:37.462532 4687 scope.go:117] "RemoveContainer" containerID="ed6f2a783e4cc6b5d2e8cb7d015481b22894333f8f1cbf4cc5a1a94e4bcfc3ec" Nov 25 09:34:37 crc kubenswrapper[4687]: I1125 09:34:37.503189 4687 scope.go:117] "RemoveContainer" containerID="213bf437309e42cd8989c538e3655f49beee4fefe8c494e9af5b52e4c54d4244" Nov 25 09:34:37 crc kubenswrapper[4687]: I1125 09:34:37.522607 4687 scope.go:117] "RemoveContainer" containerID="24477c3d7537d4689d99b2da56ecda859836de0fecd9930e4fefb8d153aff3f5" Nov 25 09:34:37 crc kubenswrapper[4687]: I1125 09:34:37.735290 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:34:37 crc kubenswrapper[4687]: E1125 09:34:37.735793 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:34:39 crc kubenswrapper[4687]: I1125 09:34:39.948907 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" event={"ID":"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc","Type":"ContainerStarted","Data":"e5537125eb78c9741a2ec49a77dd6623c1b7fe0e08c63b7cb5571bb67131daf4"} Nov 25 09:34:39 crc kubenswrapper[4687]: I1125 09:34:39.980640 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" podStartSLOduration=2.942047387 podStartE2EDuration="4.980615566s" podCreationTimestamp="2025-11-25 09:34:35 +0000 UTC" firstStartedPulling="2025-11-25 09:34:36.606418065 +0000 UTC m=+1871.660057803" lastFinishedPulling="2025-11-25 09:34:38.644986264 +0000 UTC m=+1873.698625982" observedRunningTime="2025-11-25 09:34:39.965474065 +0000 UTC m=+1875.019113783" watchObservedRunningTime="2025-11-25 09:34:39.980615566 +0000 UTC m=+1875.034255294" Nov 25 09:34:48 crc kubenswrapper[4687]: I1125 09:34:48.735145 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:34:48 crc kubenswrapper[4687]: E1125 09:34:48.735932 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:34:51 crc kubenswrapper[4687]: I1125 09:34:51.052132 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wm5bb"] Nov 25 09:34:51 crc kubenswrapper[4687]: I1125 09:34:51.061755 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wm5bb"] Nov 25 09:34:51 crc kubenswrapper[4687]: I1125 09:34:51.744986 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79c7ec90-6869-4eed-9ced-6ba0adfe7965" path="/var/lib/kubelet/pods/79c7ec90-6869-4eed-9ced-6ba0adfe7965/volumes" Nov 25 09:35:00 crc kubenswrapper[4687]: I1125 09:35:00.735306 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:35:00 crc kubenswrapper[4687]: E1125 09:35:00.736331 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:35:13 crc kubenswrapper[4687]: I1125 09:35:13.049448 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-fp7dc"] Nov 25 09:35:13 crc kubenswrapper[4687]: I1125 09:35:13.057226 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-fp7dc"] Nov 25 09:35:13 crc kubenswrapper[4687]: I1125 09:35:13.746379 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ced595e-30dd-4300-b8b6-df549003f298" path="/var/lib/kubelet/pods/7ced595e-30dd-4300-b8b6-df549003f298/volumes" Nov 25 09:35:14 crc kubenswrapper[4687]: I1125 09:35:14.734875 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:35:14 crc kubenswrapper[4687]: E1125 09:35:14.735241 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:35:18 crc kubenswrapper[4687]: I1125 09:35:18.038359 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6nqsr"] Nov 25 09:35:18 crc kubenswrapper[4687]: I1125 09:35:18.051237 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6nqsr"] Nov 25 09:35:19 crc kubenswrapper[4687]: I1125 09:35:19.746600 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08ecdbfb-3483-4dc7-83a1-a9dd7b03126a" path="/var/lib/kubelet/pods/08ecdbfb-3483-4dc7-83a1-a9dd7b03126a/volumes" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.195415 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-847fc"] Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.199242 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.217393 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-847fc"] Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.261318 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-catalog-content\") pod \"certified-operators-847fc\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.261699 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-utilities\") pod \"certified-operators-847fc\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.261819 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h845r\" (UniqueName: \"kubernetes.io/projected/004f0ec8-0932-44a0-aa74-985e60adebef-kube-api-access-h845r\") pod \"certified-operators-847fc\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.364273 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-catalog-content\") pod \"certified-operators-847fc\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.364731 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-utilities\") pod \"certified-operators-847fc\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.364767 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h845r\" (UniqueName: \"kubernetes.io/projected/004f0ec8-0932-44a0-aa74-985e60adebef-kube-api-access-h845r\") pod \"certified-operators-847fc\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.365073 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-catalog-content\") pod \"certified-operators-847fc\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.365368 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-utilities\") pod \"certified-operators-847fc\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.391819 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h845r\" (UniqueName: \"kubernetes.io/projected/004f0ec8-0932-44a0-aa74-985e60adebef-kube-api-access-h845r\") pod \"certified-operators-847fc\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:22 crc kubenswrapper[4687]: I1125 09:35:22.527189 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:23 crc kubenswrapper[4687]: I1125 09:35:23.025242 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-847fc"] Nov 25 09:35:23 crc kubenswrapper[4687]: I1125 09:35:23.357750 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-847fc" event={"ID":"004f0ec8-0932-44a0-aa74-985e60adebef","Type":"ContainerStarted","Data":"86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9"} Nov 25 09:35:23 crc kubenswrapper[4687]: I1125 09:35:23.357807 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-847fc" event={"ID":"004f0ec8-0932-44a0-aa74-985e60adebef","Type":"ContainerStarted","Data":"6b15368db8bb5cbf1e5ce8b8b5256490f0ac96c7de92547bde5c5418d0fbfed8"} Nov 25 09:35:24 crc kubenswrapper[4687]: I1125 09:35:24.375308 4687 generic.go:334] "Generic (PLEG): container finished" podID="004f0ec8-0932-44a0-aa74-985e60adebef" containerID="86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9" exitCode=0 Nov 25 09:35:24 crc kubenswrapper[4687]: I1125 09:35:24.375374 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-847fc" event={"ID":"004f0ec8-0932-44a0-aa74-985e60adebef","Type":"ContainerDied","Data":"86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9"} Nov 25 09:35:26 crc kubenswrapper[4687]: I1125 09:35:26.393358 4687 generic.go:334] "Generic (PLEG): container finished" podID="004f0ec8-0932-44a0-aa74-985e60adebef" containerID="cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e" exitCode=0 Nov 25 09:35:26 crc kubenswrapper[4687]: I1125 09:35:26.393554 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-847fc" event={"ID":"004f0ec8-0932-44a0-aa74-985e60adebef","Type":"ContainerDied","Data":"cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e"} Nov 25 09:35:26 crc kubenswrapper[4687]: I1125 09:35:26.734387 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:35:26 crc kubenswrapper[4687]: E1125 09:35:26.734959 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:35:27 crc kubenswrapper[4687]: I1125 09:35:27.405603 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-847fc" event={"ID":"004f0ec8-0932-44a0-aa74-985e60adebef","Type":"ContainerStarted","Data":"045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a"} Nov 25 09:35:32 crc kubenswrapper[4687]: I1125 09:35:32.527605 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:32 crc kubenswrapper[4687]: I1125 09:35:32.528200 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:32 crc kubenswrapper[4687]: I1125 09:35:32.579644 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:32 crc kubenswrapper[4687]: I1125 09:35:32.607046 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-847fc" podStartSLOduration=8.036854167 podStartE2EDuration="10.607024029s" podCreationTimestamp="2025-11-25 09:35:22 +0000 UTC" firstStartedPulling="2025-11-25 09:35:24.378128707 +0000 UTC m=+1919.431768425" lastFinishedPulling="2025-11-25 09:35:26.948298539 +0000 UTC m=+1922.001938287" observedRunningTime="2025-11-25 09:35:27.428748725 +0000 UTC m=+1922.482388443" watchObservedRunningTime="2025-11-25 09:35:32.607024029 +0000 UTC m=+1927.660663757" Nov 25 09:35:33 crc kubenswrapper[4687]: I1125 09:35:33.496569 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:33 crc kubenswrapper[4687]: I1125 09:35:33.553552 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-847fc"] Nov 25 09:35:35 crc kubenswrapper[4687]: I1125 09:35:35.469848 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-847fc" podUID="004f0ec8-0932-44a0-aa74-985e60adebef" containerName="registry-server" containerID="cri-o://045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a" gracePeriod=2 Nov 25 09:35:35 crc kubenswrapper[4687]: I1125 09:35:35.935253 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.017454 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-catalog-content\") pod \"004f0ec8-0932-44a0-aa74-985e60adebef\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.017599 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h845r\" (UniqueName: \"kubernetes.io/projected/004f0ec8-0932-44a0-aa74-985e60adebef-kube-api-access-h845r\") pod \"004f0ec8-0932-44a0-aa74-985e60adebef\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.017673 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-utilities\") pod \"004f0ec8-0932-44a0-aa74-985e60adebef\" (UID: \"004f0ec8-0932-44a0-aa74-985e60adebef\") " Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.019036 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-utilities" (OuterVolumeSpecName: "utilities") pod "004f0ec8-0932-44a0-aa74-985e60adebef" (UID: "004f0ec8-0932-44a0-aa74-985e60adebef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.038573 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/004f0ec8-0932-44a0-aa74-985e60adebef-kube-api-access-h845r" (OuterVolumeSpecName: "kube-api-access-h845r") pod "004f0ec8-0932-44a0-aa74-985e60adebef" (UID: "004f0ec8-0932-44a0-aa74-985e60adebef"). InnerVolumeSpecName "kube-api-access-h845r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.089990 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "004f0ec8-0932-44a0-aa74-985e60adebef" (UID: "004f0ec8-0932-44a0-aa74-985e60adebef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.120247 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h845r\" (UniqueName: \"kubernetes.io/projected/004f0ec8-0932-44a0-aa74-985e60adebef-kube-api-access-h845r\") on node \"crc\" DevicePath \"\"" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.120517 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.120620 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/004f0ec8-0932-44a0-aa74-985e60adebef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.479893 4687 generic.go:334] "Generic (PLEG): container finished" podID="004f0ec8-0932-44a0-aa74-985e60adebef" containerID="045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a" exitCode=0 Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.479949 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-847fc" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.479966 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-847fc" event={"ID":"004f0ec8-0932-44a0-aa74-985e60adebef","Type":"ContainerDied","Data":"045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a"} Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.481120 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-847fc" event={"ID":"004f0ec8-0932-44a0-aa74-985e60adebef","Type":"ContainerDied","Data":"6b15368db8bb5cbf1e5ce8b8b5256490f0ac96c7de92547bde5c5418d0fbfed8"} Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.481160 4687 scope.go:117] "RemoveContainer" containerID="045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.505898 4687 scope.go:117] "RemoveContainer" containerID="cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.516275 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-847fc"] Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.524655 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-847fc"] Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.542096 4687 scope.go:117] "RemoveContainer" containerID="86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.575712 4687 scope.go:117] "RemoveContainer" containerID="045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a" Nov 25 09:35:36 crc kubenswrapper[4687]: E1125 09:35:36.579656 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a\": container with ID starting with 045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a not found: ID does not exist" containerID="045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.579716 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a"} err="failed to get container status \"045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a\": rpc error: code = NotFound desc = could not find container \"045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a\": container with ID starting with 045d73f55b312b3f2796440125812836e3e2f183d8a1afcb502a228483b6043a not found: ID does not exist" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.579752 4687 scope.go:117] "RemoveContainer" containerID="cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e" Nov 25 09:35:36 crc kubenswrapper[4687]: E1125 09:35:36.580396 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e\": container with ID starting with cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e not found: ID does not exist" containerID="cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.580444 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e"} err="failed to get container status \"cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e\": rpc error: code = NotFound desc = could not find container \"cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e\": container with ID starting with cef7e42a7766979e20ee1f51b9da4e9a1960bbb4da23a7514a3929f0b21c529e not found: ID does not exist" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.580484 4687 scope.go:117] "RemoveContainer" containerID="86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9" Nov 25 09:35:36 crc kubenswrapper[4687]: E1125 09:35:36.580930 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9\": container with ID starting with 86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9 not found: ID does not exist" containerID="86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9" Nov 25 09:35:36 crc kubenswrapper[4687]: I1125 09:35:36.581028 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9"} err="failed to get container status \"86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9\": rpc error: code = NotFound desc = could not find container \"86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9\": container with ID starting with 86f3d3c0759d40283b68981d71f3a0606efe918d6f24695e85cf2101e8fa5cb9 not found: ID does not exist" Nov 25 09:35:37 crc kubenswrapper[4687]: I1125 09:35:37.749043 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="004f0ec8-0932-44a0-aa74-985e60adebef" path="/var/lib/kubelet/pods/004f0ec8-0932-44a0-aa74-985e60adebef/volumes" Nov 25 09:35:37 crc kubenswrapper[4687]: I1125 09:35:37.768453 4687 scope.go:117] "RemoveContainer" containerID="4de9d94df83cd6ce2747ad3482cd75aad445a50c7b3090b8554d29f5995714e9" Nov 25 09:35:37 crc kubenswrapper[4687]: I1125 09:35:37.827223 4687 scope.go:117] "RemoveContainer" containerID="36dcaced308de8a14b8e0fa94c3bc5987d0b6dea9eea2dd787cc9622583dd1d4" Nov 25 09:35:37 crc kubenswrapper[4687]: I1125 09:35:37.879554 4687 scope.go:117] "RemoveContainer" containerID="9072d01244da2515ab0e242e0a9776bf583d4df342452cd2c72d2a5c77ed4fff" Nov 25 09:35:38 crc kubenswrapper[4687]: I1125 09:35:38.734674 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:35:38 crc kubenswrapper[4687]: E1125 09:35:38.735426 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:35:49 crc kubenswrapper[4687]: I1125 09:35:49.587219 4687 generic.go:334] "Generic (PLEG): container finished" podID="d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc" containerID="e5537125eb78c9741a2ec49a77dd6623c1b7fe0e08c63b7cb5571bb67131daf4" exitCode=0 Nov 25 09:35:49 crc kubenswrapper[4687]: I1125 09:35:49.587357 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" event={"ID":"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc","Type":"ContainerDied","Data":"e5537125eb78c9741a2ec49a77dd6623c1b7fe0e08c63b7cb5571bb67131daf4"} Nov 25 09:35:49 crc kubenswrapper[4687]: I1125 09:35:49.735104 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:35:49 crc kubenswrapper[4687]: E1125 09:35:49.735378 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.005879 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.090468 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-ssh-key\") pod \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.090528 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-inventory\") pod \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.090630 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5xnv\" (UniqueName: \"kubernetes.io/projected/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-kube-api-access-s5xnv\") pod \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.097698 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-kube-api-access-s5xnv" (OuterVolumeSpecName: "kube-api-access-s5xnv") pod "d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc" (UID: "d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc"). InnerVolumeSpecName "kube-api-access-s5xnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:35:51 crc kubenswrapper[4687]: E1125 09:35:51.112762 4687 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-inventory podName:d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc nodeName:}" failed. No retries permitted until 2025-11-25 09:35:51.612727104 +0000 UTC m=+1946.666366842 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "inventory" (UniqueName: "kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-inventory") pod "d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc" (UID: "d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc") : error deleting /var/lib/kubelet/pods/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc/volume-subpaths: remove /var/lib/kubelet/pods/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc/volume-subpaths: no such file or directory Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.115530 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc" (UID: "d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.193123 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.193164 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5xnv\" (UniqueName: \"kubernetes.io/projected/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-kube-api-access-s5xnv\") on node \"crc\" DevicePath \"\"" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.606714 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" event={"ID":"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc","Type":"ContainerDied","Data":"c86251a95e98dd0aebb226e8926ad61b6a51920f3078bd75ab38188f7c54a995"} Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.606746 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.606760 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c86251a95e98dd0aebb226e8926ad61b6a51920f3078bd75ab38188f7c54a995" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.689710 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct"] Nov 25 09:35:51 crc kubenswrapper[4687]: E1125 09:35:51.690091 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.690109 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 09:35:51 crc kubenswrapper[4687]: E1125 09:35:51.690122 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="004f0ec8-0932-44a0-aa74-985e60adebef" containerName="extract-content" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.690129 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="004f0ec8-0932-44a0-aa74-985e60adebef" containerName="extract-content" Nov 25 09:35:51 crc kubenswrapper[4687]: E1125 09:35:51.690151 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="004f0ec8-0932-44a0-aa74-985e60adebef" containerName="extract-utilities" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.690158 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="004f0ec8-0932-44a0-aa74-985e60adebef" containerName="extract-utilities" Nov 25 09:35:51 crc kubenswrapper[4687]: E1125 09:35:51.690186 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="004f0ec8-0932-44a0-aa74-985e60adebef" containerName="registry-server" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.690194 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="004f0ec8-0932-44a0-aa74-985e60adebef" containerName="registry-server" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.690358 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.690376 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="004f0ec8-0932-44a0-aa74-985e60adebef" containerName="registry-server" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.690960 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.701493 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-inventory\") pod \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\" (UID: \"d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc\") " Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.704193 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct"] Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.708531 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-inventory" (OuterVolumeSpecName: "inventory") pod "d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc" (UID: "d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.803567 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d2d4\" (UniqueName: \"kubernetes.io/projected/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-kube-api-access-6d2d4\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-wnpct\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.803653 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-wnpct\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.803881 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-wnpct\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.804303 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.905474 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d2d4\" (UniqueName: \"kubernetes.io/projected/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-kube-api-access-6d2d4\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-wnpct\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.905561 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-wnpct\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.905610 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-wnpct\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.909854 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-wnpct\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.910080 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-wnpct\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:51 crc kubenswrapper[4687]: I1125 09:35:51.926100 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d2d4\" (UniqueName: \"kubernetes.io/projected/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-kube-api-access-6d2d4\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-wnpct\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:52 crc kubenswrapper[4687]: I1125 09:35:52.068338 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:35:52 crc kubenswrapper[4687]: I1125 09:35:52.615835 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct"] Nov 25 09:35:52 crc kubenswrapper[4687]: W1125 09:35:52.624281 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba70f059_0179_41d0_b0fe_2f0e0b4db2a7.slice/crio-d613d8f544b6b3efb0447a4d48e523727912d8829a1c5a4f7e774f9dcaef0d88 WatchSource:0}: Error finding container d613d8f544b6b3efb0447a4d48e523727912d8829a1c5a4f7e774f9dcaef0d88: Status 404 returned error can't find the container with id d613d8f544b6b3efb0447a4d48e523727912d8829a1c5a4f7e774f9dcaef0d88 Nov 25 09:35:53 crc kubenswrapper[4687]: I1125 09:35:53.655311 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" event={"ID":"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7","Type":"ContainerStarted","Data":"b49d3d9d7f87dfb6b428bc1792d47e844c7a17ce0b698f3527a9c4bf0b7e0d3c"} Nov 25 09:35:53 crc kubenswrapper[4687]: I1125 09:35:53.655971 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" event={"ID":"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7","Type":"ContainerStarted","Data":"d613d8f544b6b3efb0447a4d48e523727912d8829a1c5a4f7e774f9dcaef0d88"} Nov 25 09:35:53 crc kubenswrapper[4687]: I1125 09:35:53.682333 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" podStartSLOduration=2.103813156 podStartE2EDuration="2.68231855s" podCreationTimestamp="2025-11-25 09:35:51 +0000 UTC" firstStartedPulling="2025-11-25 09:35:52.626591807 +0000 UTC m=+1947.680231525" lastFinishedPulling="2025-11-25 09:35:53.205097201 +0000 UTC m=+1948.258736919" observedRunningTime="2025-11-25 09:35:53.680279555 +0000 UTC m=+1948.733919273" watchObservedRunningTime="2025-11-25 09:35:53.68231855 +0000 UTC m=+1948.735958258" Nov 25 09:35:58 crc kubenswrapper[4687]: I1125 09:35:58.042304 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-bsrx2"] Nov 25 09:35:58 crc kubenswrapper[4687]: I1125 09:35:58.050493 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-bsrx2"] Nov 25 09:35:58 crc kubenswrapper[4687]: I1125 09:35:58.695318 4687 generic.go:334] "Generic (PLEG): container finished" podID="ba70f059-0179-41d0-b0fe-2f0e0b4db2a7" containerID="b49d3d9d7f87dfb6b428bc1792d47e844c7a17ce0b698f3527a9c4bf0b7e0d3c" exitCode=0 Nov 25 09:35:58 crc kubenswrapper[4687]: I1125 09:35:58.695435 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" event={"ID":"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7","Type":"ContainerDied","Data":"b49d3d9d7f87dfb6b428bc1792d47e844c7a17ce0b698f3527a9c4bf0b7e0d3c"} Nov 25 09:35:59 crc kubenswrapper[4687]: I1125 09:35:59.746384 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f5e992c-a5a9-437d-9f18-89684260190c" path="/var/lib/kubelet/pods/5f5e992c-a5a9-437d-9f18-89684260190c/volumes" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.132091 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.276958 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6d2d4\" (UniqueName: \"kubernetes.io/projected/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-kube-api-access-6d2d4\") pod \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.277100 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-inventory\") pod \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.277222 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-ssh-key\") pod \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\" (UID: \"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7\") " Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.289400 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-kube-api-access-6d2d4" (OuterVolumeSpecName: "kube-api-access-6d2d4") pod "ba70f059-0179-41d0-b0fe-2f0e0b4db2a7" (UID: "ba70f059-0179-41d0-b0fe-2f0e0b4db2a7"). InnerVolumeSpecName "kube-api-access-6d2d4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.304119 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ba70f059-0179-41d0-b0fe-2f0e0b4db2a7" (UID: "ba70f059-0179-41d0-b0fe-2f0e0b4db2a7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.305220 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-inventory" (OuterVolumeSpecName: "inventory") pod "ba70f059-0179-41d0-b0fe-2f0e0b4db2a7" (UID: "ba70f059-0179-41d0-b0fe-2f0e0b4db2a7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.380152 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.380186 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6d2d4\" (UniqueName: \"kubernetes.io/projected/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-kube-api-access-6d2d4\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.380200 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba70f059-0179-41d0-b0fe-2f0e0b4db2a7-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.712940 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" event={"ID":"ba70f059-0179-41d0-b0fe-2f0e0b4db2a7","Type":"ContainerDied","Data":"d613d8f544b6b3efb0447a4d48e523727912d8829a1c5a4f7e774f9dcaef0d88"} Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.712992 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d613d8f544b6b3efb0447a4d48e523727912d8829a1c5a4f7e774f9dcaef0d88" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.713030 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-wnpct" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.797609 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t"] Nov 25 09:36:00 crc kubenswrapper[4687]: E1125 09:36:00.798061 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba70f059-0179-41d0-b0fe-2f0e0b4db2a7" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.798075 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba70f059-0179-41d0-b0fe-2f0e0b4db2a7" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.798290 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba70f059-0179-41d0-b0fe-2f0e0b4db2a7" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.798986 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.802669 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.802809 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.802900 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.802918 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.806986 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t"] Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.887214 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jh85t\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.887347 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jh85t\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.887415 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cgkf\" (UniqueName: \"kubernetes.io/projected/29a959b9-db17-40b5-8c9b-f54bc3548ca2-kube-api-access-7cgkf\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jh85t\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.989010 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jh85t\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.989341 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cgkf\" (UniqueName: \"kubernetes.io/projected/29a959b9-db17-40b5-8c9b-f54bc3548ca2-kube-api-access-7cgkf\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jh85t\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.989537 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jh85t\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.993962 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jh85t\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:00 crc kubenswrapper[4687]: I1125 09:36:00.999684 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jh85t\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:01 crc kubenswrapper[4687]: I1125 09:36:01.006377 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cgkf\" (UniqueName: \"kubernetes.io/projected/29a959b9-db17-40b5-8c9b-f54bc3548ca2-kube-api-access-7cgkf\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jh85t\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:01 crc kubenswrapper[4687]: I1125 09:36:01.117124 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:01 crc kubenswrapper[4687]: I1125 09:36:01.668830 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t"] Nov 25 09:36:01 crc kubenswrapper[4687]: I1125 09:36:01.722774 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" event={"ID":"29a959b9-db17-40b5-8c9b-f54bc3548ca2","Type":"ContainerStarted","Data":"95cdc12ad605a95cc1ef1e778546ebcc7dbcf3cbef5c8bc481c515c77f42eda1"} Nov 25 09:36:03 crc kubenswrapper[4687]: I1125 09:36:03.735992 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:36:03 crc kubenswrapper[4687]: E1125 09:36:03.737177 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:36:04 crc kubenswrapper[4687]: I1125 09:36:04.760444 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" event={"ID":"29a959b9-db17-40b5-8c9b-f54bc3548ca2","Type":"ContainerStarted","Data":"bb5c16c6d1d084cd9c1480e390399bc76d4e36a386d4e3f39546d79f41e47d94"} Nov 25 09:36:16 crc kubenswrapper[4687]: I1125 09:36:16.734457 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:36:16 crc kubenswrapper[4687]: E1125 09:36:16.735337 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:36:29 crc kubenswrapper[4687]: I1125 09:36:29.735493 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:36:30 crc kubenswrapper[4687]: I1125 09:36:30.982905 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"f232f6663409036e1916f49b44a9d348d4367ab37376d99fe2b76c8655d5ccae"} Nov 25 09:36:31 crc kubenswrapper[4687]: I1125 09:36:31.000637 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" podStartSLOduration=28.969218183 podStartE2EDuration="31.000617278s" podCreationTimestamp="2025-11-25 09:36:00 +0000 UTC" firstStartedPulling="2025-11-25 09:36:01.672484248 +0000 UTC m=+1956.726123966" lastFinishedPulling="2025-11-25 09:36:03.703883333 +0000 UTC m=+1958.757523061" observedRunningTime="2025-11-25 09:36:04.780277386 +0000 UTC m=+1959.833917104" watchObservedRunningTime="2025-11-25 09:36:31.000617278 +0000 UTC m=+1986.054257016" Nov 25 09:36:37 crc kubenswrapper[4687]: I1125 09:36:37.984377 4687 scope.go:117] "RemoveContainer" containerID="68b8ca17adfd03dd0df30f5ddfc10ae586ef55f2e271a15face759ef764d9fa0" Nov 25 09:36:43 crc kubenswrapper[4687]: I1125 09:36:43.091014 4687 generic.go:334] "Generic (PLEG): container finished" podID="29a959b9-db17-40b5-8c9b-f54bc3548ca2" containerID="bb5c16c6d1d084cd9c1480e390399bc76d4e36a386d4e3f39546d79f41e47d94" exitCode=0 Nov 25 09:36:43 crc kubenswrapper[4687]: I1125 09:36:43.091095 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" event={"ID":"29a959b9-db17-40b5-8c9b-f54bc3548ca2","Type":"ContainerDied","Data":"bb5c16c6d1d084cd9c1480e390399bc76d4e36a386d4e3f39546d79f41e47d94"} Nov 25 09:36:44 crc kubenswrapper[4687]: I1125 09:36:44.475662 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:44 crc kubenswrapper[4687]: I1125 09:36:44.536916 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cgkf\" (UniqueName: \"kubernetes.io/projected/29a959b9-db17-40b5-8c9b-f54bc3548ca2-kube-api-access-7cgkf\") pod \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " Nov 25 09:36:44 crc kubenswrapper[4687]: I1125 09:36:44.537060 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-ssh-key\") pod \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " Nov 25 09:36:44 crc kubenswrapper[4687]: I1125 09:36:44.537105 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-inventory\") pod \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\" (UID: \"29a959b9-db17-40b5-8c9b-f54bc3548ca2\") " Nov 25 09:36:44 crc kubenswrapper[4687]: I1125 09:36:44.558120 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29a959b9-db17-40b5-8c9b-f54bc3548ca2-kube-api-access-7cgkf" (OuterVolumeSpecName: "kube-api-access-7cgkf") pod "29a959b9-db17-40b5-8c9b-f54bc3548ca2" (UID: "29a959b9-db17-40b5-8c9b-f54bc3548ca2"). InnerVolumeSpecName "kube-api-access-7cgkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:44 crc kubenswrapper[4687]: I1125 09:36:44.567808 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "29a959b9-db17-40b5-8c9b-f54bc3548ca2" (UID: "29a959b9-db17-40b5-8c9b-f54bc3548ca2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:44 crc kubenswrapper[4687]: I1125 09:36:44.573791 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-inventory" (OuterVolumeSpecName: "inventory") pod "29a959b9-db17-40b5-8c9b-f54bc3548ca2" (UID: "29a959b9-db17-40b5-8c9b-f54bc3548ca2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:44 crc kubenswrapper[4687]: I1125 09:36:44.639426 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:44 crc kubenswrapper[4687]: I1125 09:36:44.639742 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29a959b9-db17-40b5-8c9b-f54bc3548ca2-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:44 crc kubenswrapper[4687]: I1125 09:36:44.639759 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cgkf\" (UniqueName: \"kubernetes.io/projected/29a959b9-db17-40b5-8c9b-f54bc3548ca2-kube-api-access-7cgkf\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.109002 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" event={"ID":"29a959b9-db17-40b5-8c9b-f54bc3548ca2","Type":"ContainerDied","Data":"95cdc12ad605a95cc1ef1e778546ebcc7dbcf3cbef5c8bc481c515c77f42eda1"} Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.109056 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jh85t" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.109062 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95cdc12ad605a95cc1ef1e778546ebcc7dbcf3cbef5c8bc481c515c77f42eda1" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.197012 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr"] Nov 25 09:36:45 crc kubenswrapper[4687]: E1125 09:36:45.197374 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29a959b9-db17-40b5-8c9b-f54bc3548ca2" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.197390 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="29a959b9-db17-40b5-8c9b-f54bc3548ca2" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.197630 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="29a959b9-db17-40b5-8c9b-f54bc3548ca2" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.198291 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.203606 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.204173 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.204566 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.205352 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.223268 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr"] Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.251625 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bx99z\" (UniqueName: \"kubernetes.io/projected/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-kube-api-access-bx99z\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.251932 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.252025 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.354176 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.354236 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.354329 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bx99z\" (UniqueName: \"kubernetes.io/projected/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-kube-api-access-bx99z\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.362650 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.368288 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.370805 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bx99z\" (UniqueName: \"kubernetes.io/projected/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-kube-api-access-bx99z\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:45 crc kubenswrapper[4687]: I1125 09:36:45.530879 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:36:46 crc kubenswrapper[4687]: I1125 09:36:46.058129 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr"] Nov 25 09:36:46 crc kubenswrapper[4687]: I1125 09:36:46.123609 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" event={"ID":"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272","Type":"ContainerStarted","Data":"8d7fb3a1a75faa238bac7eee5aabadf5d0eea9855fd1f2517f579d7e0510fae8"} Nov 25 09:36:48 crc kubenswrapper[4687]: I1125 09:36:48.142157 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" event={"ID":"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272","Type":"ContainerStarted","Data":"a86fd0be0688dd274b3c658aa3df611e69400280d01265350b26a556f03fd95a"} Nov 25 09:36:48 crc kubenswrapper[4687]: I1125 09:36:48.173779 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" podStartSLOduration=2.351281247 podStartE2EDuration="3.173755536s" podCreationTimestamp="2025-11-25 09:36:45 +0000 UTC" firstStartedPulling="2025-11-25 09:36:46.07595018 +0000 UTC m=+2001.129589898" lastFinishedPulling="2025-11-25 09:36:46.898424469 +0000 UTC m=+2001.952064187" observedRunningTime="2025-11-25 09:36:48.157025712 +0000 UTC m=+2003.210665450" watchObservedRunningTime="2025-11-25 09:36:48.173755536 +0000 UTC m=+2003.227395254" Nov 25 09:36:58 crc kubenswrapper[4687]: I1125 09:36:58.979213 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qpxgl"] Nov 25 09:36:58 crc kubenswrapper[4687]: I1125 09:36:58.981718 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:58 crc kubenswrapper[4687]: I1125 09:36:58.990080 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qpxgl"] Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.139414 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-utilities\") pod \"community-operators-qpxgl\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.139482 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-catalog-content\") pod \"community-operators-qpxgl\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.139579 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7856q\" (UniqueName: \"kubernetes.io/projected/954c615c-25f5-48ff-96a0-95dd985e135c-kube-api-access-7856q\") pod \"community-operators-qpxgl\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.245274 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-utilities\") pod \"community-operators-qpxgl\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.245353 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-catalog-content\") pod \"community-operators-qpxgl\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.245511 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7856q\" (UniqueName: \"kubernetes.io/projected/954c615c-25f5-48ff-96a0-95dd985e135c-kube-api-access-7856q\") pod \"community-operators-qpxgl\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.246394 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-utilities\") pod \"community-operators-qpxgl\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.246724 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-catalog-content\") pod \"community-operators-qpxgl\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.275796 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7856q\" (UniqueName: \"kubernetes.io/projected/954c615c-25f5-48ff-96a0-95dd985e135c-kube-api-access-7856q\") pod \"community-operators-qpxgl\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.306238 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:36:59 crc kubenswrapper[4687]: I1125 09:36:59.869198 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qpxgl"] Nov 25 09:37:00 crc kubenswrapper[4687]: I1125 09:37:00.240149 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpxgl" event={"ID":"954c615c-25f5-48ff-96a0-95dd985e135c","Type":"ContainerStarted","Data":"2d5dfa95ded3f6a2fb48b8e33cd987394168900471912dfcdf92925fd3f5eeac"} Nov 25 09:37:00 crc kubenswrapper[4687]: I1125 09:37:00.240197 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpxgl" event={"ID":"954c615c-25f5-48ff-96a0-95dd985e135c","Type":"ContainerStarted","Data":"2f7295f31237c21ea556bd976a4fad3b797d4ad69ced085edb2dd90120333bd8"} Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.252900 4687 generic.go:334] "Generic (PLEG): container finished" podID="954c615c-25f5-48ff-96a0-95dd985e135c" containerID="2d5dfa95ded3f6a2fb48b8e33cd987394168900471912dfcdf92925fd3f5eeac" exitCode=0 Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.253021 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpxgl" event={"ID":"954c615c-25f5-48ff-96a0-95dd985e135c","Type":"ContainerDied","Data":"2d5dfa95ded3f6a2fb48b8e33cd987394168900471912dfcdf92925fd3f5eeac"} Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.369352 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xcdmw"] Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.371950 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.379467 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xcdmw"] Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.488475 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-catalog-content\") pod \"redhat-operators-xcdmw\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.488754 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-utilities\") pod \"redhat-operators-xcdmw\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.488794 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcsxn\" (UniqueName: \"kubernetes.io/projected/34240012-1d2c-41d0-8e7c-70b4d4e260a8-kube-api-access-xcsxn\") pod \"redhat-operators-xcdmw\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.590195 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-utilities\") pod \"redhat-operators-xcdmw\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.590476 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcsxn\" (UniqueName: \"kubernetes.io/projected/34240012-1d2c-41d0-8e7c-70b4d4e260a8-kube-api-access-xcsxn\") pod \"redhat-operators-xcdmw\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.590596 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-catalog-content\") pod \"redhat-operators-xcdmw\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.590959 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-utilities\") pod \"redhat-operators-xcdmw\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.591057 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-catalog-content\") pod \"redhat-operators-xcdmw\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.611772 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcsxn\" (UniqueName: \"kubernetes.io/projected/34240012-1d2c-41d0-8e7c-70b4d4e260a8-kube-api-access-xcsxn\") pod \"redhat-operators-xcdmw\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:01 crc kubenswrapper[4687]: I1125 09:37:01.694754 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:02 crc kubenswrapper[4687]: I1125 09:37:02.214020 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xcdmw"] Nov 25 09:37:02 crc kubenswrapper[4687]: I1125 09:37:02.267907 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xcdmw" event={"ID":"34240012-1d2c-41d0-8e7c-70b4d4e260a8","Type":"ContainerStarted","Data":"7fc929dba80dccbf272cc695d0f42fa617deb73b80da14e3834480dff63d96bc"} Nov 25 09:37:05 crc kubenswrapper[4687]: I1125 09:37:05.301468 4687 generic.go:334] "Generic (PLEG): container finished" podID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerID="7fa026561c2336f47884d1f865595f6bd3d7e55ad82be2dc76748ba87ab32526" exitCode=0 Nov 25 09:37:05 crc kubenswrapper[4687]: I1125 09:37:05.301595 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xcdmw" event={"ID":"34240012-1d2c-41d0-8e7c-70b4d4e260a8","Type":"ContainerDied","Data":"7fa026561c2336f47884d1f865595f6bd3d7e55ad82be2dc76748ba87ab32526"} Nov 25 09:37:05 crc kubenswrapper[4687]: I1125 09:37:05.305791 4687 generic.go:334] "Generic (PLEG): container finished" podID="954c615c-25f5-48ff-96a0-95dd985e135c" containerID="c53f50260349379c97a64beb2d161ad1c8c14d70e4a309fd2cc47d2b49bbd8b5" exitCode=0 Nov 25 09:37:05 crc kubenswrapper[4687]: I1125 09:37:05.305836 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpxgl" event={"ID":"954c615c-25f5-48ff-96a0-95dd985e135c","Type":"ContainerDied","Data":"c53f50260349379c97a64beb2d161ad1c8c14d70e4a309fd2cc47d2b49bbd8b5"} Nov 25 09:37:07 crc kubenswrapper[4687]: I1125 09:37:07.330380 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpxgl" event={"ID":"954c615c-25f5-48ff-96a0-95dd985e135c","Type":"ContainerStarted","Data":"a97ad439dc0caca840e0d6562782fdd7e2315627d89e8f2fb0c1cb09c01efcae"} Nov 25 09:37:07 crc kubenswrapper[4687]: I1125 09:37:07.370321 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qpxgl" podStartSLOduration=4.378758321 podStartE2EDuration="9.37030042s" podCreationTimestamp="2025-11-25 09:36:58 +0000 UTC" firstStartedPulling="2025-11-25 09:37:01.25504307 +0000 UTC m=+2016.308682788" lastFinishedPulling="2025-11-25 09:37:06.246585149 +0000 UTC m=+2021.300224887" observedRunningTime="2025-11-25 09:37:07.370197557 +0000 UTC m=+2022.423837305" watchObservedRunningTime="2025-11-25 09:37:07.37030042 +0000 UTC m=+2022.423940138" Nov 25 09:37:09 crc kubenswrapper[4687]: I1125 09:37:09.307983 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:37:09 crc kubenswrapper[4687]: I1125 09:37:09.308921 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:37:09 crc kubenswrapper[4687]: I1125 09:37:09.354776 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:37:09 crc kubenswrapper[4687]: I1125 09:37:09.358927 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xcdmw" event={"ID":"34240012-1d2c-41d0-8e7c-70b4d4e260a8","Type":"ContainerStarted","Data":"211b0ae84f399f28b167bd07b06471dc138d2a87ab00a626fe43a046a1fd4131"} Nov 25 09:37:10 crc kubenswrapper[4687]: I1125 09:37:10.371759 4687 generic.go:334] "Generic (PLEG): container finished" podID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerID="211b0ae84f399f28b167bd07b06471dc138d2a87ab00a626fe43a046a1fd4131" exitCode=0 Nov 25 09:37:10 crc kubenswrapper[4687]: I1125 09:37:10.371842 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xcdmw" event={"ID":"34240012-1d2c-41d0-8e7c-70b4d4e260a8","Type":"ContainerDied","Data":"211b0ae84f399f28b167bd07b06471dc138d2a87ab00a626fe43a046a1fd4131"} Nov 25 09:37:19 crc kubenswrapper[4687]: I1125 09:37:19.372003 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:37:19 crc kubenswrapper[4687]: I1125 09:37:19.423024 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qpxgl"] Nov 25 09:37:19 crc kubenswrapper[4687]: I1125 09:37:19.448248 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qpxgl" podUID="954c615c-25f5-48ff-96a0-95dd985e135c" containerName="registry-server" containerID="cri-o://a97ad439dc0caca840e0d6562782fdd7e2315627d89e8f2fb0c1cb09c01efcae" gracePeriod=2 Nov 25 09:37:22 crc kubenswrapper[4687]: I1125 09:37:22.476545 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qpxgl_954c615c-25f5-48ff-96a0-95dd985e135c/registry-server/0.log" Nov 25 09:37:22 crc kubenswrapper[4687]: I1125 09:37:22.478022 4687 generic.go:334] "Generic (PLEG): container finished" podID="954c615c-25f5-48ff-96a0-95dd985e135c" containerID="a97ad439dc0caca840e0d6562782fdd7e2315627d89e8f2fb0c1cb09c01efcae" exitCode=137 Nov 25 09:37:22 crc kubenswrapper[4687]: I1125 09:37:22.478082 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpxgl" event={"ID":"954c615c-25f5-48ff-96a0-95dd985e135c","Type":"ContainerDied","Data":"a97ad439dc0caca840e0d6562782fdd7e2315627d89e8f2fb0c1cb09c01efcae"} Nov 25 09:37:22 crc kubenswrapper[4687]: I1125 09:37:22.963064 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qpxgl_954c615c-25f5-48ff-96a0-95dd985e135c/registry-server/0.log" Nov 25 09:37:22 crc kubenswrapper[4687]: I1125 09:37:22.967119 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.155368 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7856q\" (UniqueName: \"kubernetes.io/projected/954c615c-25f5-48ff-96a0-95dd985e135c-kube-api-access-7856q\") pod \"954c615c-25f5-48ff-96a0-95dd985e135c\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.155713 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-catalog-content\") pod \"954c615c-25f5-48ff-96a0-95dd985e135c\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.155766 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-utilities\") pod \"954c615c-25f5-48ff-96a0-95dd985e135c\" (UID: \"954c615c-25f5-48ff-96a0-95dd985e135c\") " Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.156438 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-utilities" (OuterVolumeSpecName: "utilities") pod "954c615c-25f5-48ff-96a0-95dd985e135c" (UID: "954c615c-25f5-48ff-96a0-95dd985e135c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.156689 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.162765 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/954c615c-25f5-48ff-96a0-95dd985e135c-kube-api-access-7856q" (OuterVolumeSpecName: "kube-api-access-7856q") pod "954c615c-25f5-48ff-96a0-95dd985e135c" (UID: "954c615c-25f5-48ff-96a0-95dd985e135c"). InnerVolumeSpecName "kube-api-access-7856q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.212197 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "954c615c-25f5-48ff-96a0-95dd985e135c" (UID: "954c615c-25f5-48ff-96a0-95dd985e135c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.258524 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/954c615c-25f5-48ff-96a0-95dd985e135c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.258563 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7856q\" (UniqueName: \"kubernetes.io/projected/954c615c-25f5-48ff-96a0-95dd985e135c-kube-api-access-7856q\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.491160 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xcdmw" event={"ID":"34240012-1d2c-41d0-8e7c-70b4d4e260a8","Type":"ContainerStarted","Data":"c3aa35027166df439bf3057cff3bf25cf69b2bae36e4532674c015b23d234805"} Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.493134 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qpxgl_954c615c-25f5-48ff-96a0-95dd985e135c/registry-server/0.log" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.494130 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpxgl" event={"ID":"954c615c-25f5-48ff-96a0-95dd985e135c","Type":"ContainerDied","Data":"2f7295f31237c21ea556bd976a4fad3b797d4ad69ced085edb2dd90120333bd8"} Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.494186 4687 scope.go:117] "RemoveContainer" containerID="a97ad439dc0caca840e0d6562782fdd7e2315627d89e8f2fb0c1cb09c01efcae" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.494325 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpxgl" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.517315 4687 scope.go:117] "RemoveContainer" containerID="c53f50260349379c97a64beb2d161ad1c8c14d70e4a309fd2cc47d2b49bbd8b5" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.531255 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xcdmw" podStartSLOduration=5.481626008 podStartE2EDuration="22.531227439s" podCreationTimestamp="2025-11-25 09:37:01 +0000 UTC" firstStartedPulling="2025-11-25 09:37:05.303880799 +0000 UTC m=+2020.357520517" lastFinishedPulling="2025-11-25 09:37:22.35348223 +0000 UTC m=+2037.407121948" observedRunningTime="2025-11-25 09:37:23.516459718 +0000 UTC m=+2038.570099456" watchObservedRunningTime="2025-11-25 09:37:23.531227439 +0000 UTC m=+2038.584867157" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.560015 4687 scope.go:117] "RemoveContainer" containerID="2d5dfa95ded3f6a2fb48b8e33cd987394168900471912dfcdf92925fd3f5eeac" Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.576201 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qpxgl"] Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.584598 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qpxgl"] Nov 25 09:37:23 crc kubenswrapper[4687]: I1125 09:37:23.745687 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="954c615c-25f5-48ff-96a0-95dd985e135c" path="/var/lib/kubelet/pods/954c615c-25f5-48ff-96a0-95dd985e135c/volumes" Nov 25 09:37:31 crc kubenswrapper[4687]: I1125 09:37:31.694956 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:31 crc kubenswrapper[4687]: I1125 09:37:31.696787 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:31 crc kubenswrapper[4687]: I1125 09:37:31.746568 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:32 crc kubenswrapper[4687]: I1125 09:37:32.648493 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:32 crc kubenswrapper[4687]: I1125 09:37:32.716169 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xcdmw"] Nov 25 09:37:34 crc kubenswrapper[4687]: I1125 09:37:34.600360 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xcdmw" podUID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerName="registry-server" containerID="cri-o://c3aa35027166df439bf3057cff3bf25cf69b2bae36e4532674c015b23d234805" gracePeriod=2 Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.610602 4687 generic.go:334] "Generic (PLEG): container finished" podID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerID="c3aa35027166df439bf3057cff3bf25cf69b2bae36e4532674c015b23d234805" exitCode=0 Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.610741 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xcdmw" event={"ID":"34240012-1d2c-41d0-8e7c-70b4d4e260a8","Type":"ContainerDied","Data":"c3aa35027166df439bf3057cff3bf25cf69b2bae36e4532674c015b23d234805"} Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.610938 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xcdmw" event={"ID":"34240012-1d2c-41d0-8e7c-70b4d4e260a8","Type":"ContainerDied","Data":"7fc929dba80dccbf272cc695d0f42fa617deb73b80da14e3834480dff63d96bc"} Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.610958 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7fc929dba80dccbf272cc695d0f42fa617deb73b80da14e3834480dff63d96bc" Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.647444 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.829772 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-catalog-content\") pod \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.829818 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcsxn\" (UniqueName: \"kubernetes.io/projected/34240012-1d2c-41d0-8e7c-70b4d4e260a8-kube-api-access-xcsxn\") pod \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.829881 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-utilities\") pod \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\" (UID: \"34240012-1d2c-41d0-8e7c-70b4d4e260a8\") " Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.831015 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-utilities" (OuterVolumeSpecName: "utilities") pod "34240012-1d2c-41d0-8e7c-70b4d4e260a8" (UID: "34240012-1d2c-41d0-8e7c-70b4d4e260a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.835440 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34240012-1d2c-41d0-8e7c-70b4d4e260a8-kube-api-access-xcsxn" (OuterVolumeSpecName: "kube-api-access-xcsxn") pod "34240012-1d2c-41d0-8e7c-70b4d4e260a8" (UID: "34240012-1d2c-41d0-8e7c-70b4d4e260a8"). InnerVolumeSpecName "kube-api-access-xcsxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.930996 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "34240012-1d2c-41d0-8e7c-70b4d4e260a8" (UID: "34240012-1d2c-41d0-8e7c-70b4d4e260a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.932176 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.932201 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcsxn\" (UniqueName: \"kubernetes.io/projected/34240012-1d2c-41d0-8e7c-70b4d4e260a8-kube-api-access-xcsxn\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:35 crc kubenswrapper[4687]: I1125 09:37:35.932214 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34240012-1d2c-41d0-8e7c-70b4d4e260a8-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:36 crc kubenswrapper[4687]: I1125 09:37:36.623797 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xcdmw" Nov 25 09:37:36 crc kubenswrapper[4687]: I1125 09:37:36.695247 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xcdmw"] Nov 25 09:37:36 crc kubenswrapper[4687]: I1125 09:37:36.710721 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xcdmw"] Nov 25 09:37:37 crc kubenswrapper[4687]: I1125 09:37:37.748155 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" path="/var/lib/kubelet/pods/34240012-1d2c-41d0-8e7c-70b4d4e260a8/volumes" Nov 25 09:37:38 crc kubenswrapper[4687]: I1125 09:37:38.661190 4687 generic.go:334] "Generic (PLEG): container finished" podID="906f3fa0-c7e9-40dc-a876-0e0c9cdbc272" containerID="a86fd0be0688dd274b3c658aa3df611e69400280d01265350b26a556f03fd95a" exitCode=0 Nov 25 09:37:38 crc kubenswrapper[4687]: I1125 09:37:38.661325 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" event={"ID":"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272","Type":"ContainerDied","Data":"a86fd0be0688dd274b3c658aa3df611e69400280d01265350b26a556f03fd95a"} Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.095453 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.222238 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-ssh-key\") pod \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.222457 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-inventory\") pod \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.222587 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bx99z\" (UniqueName: \"kubernetes.io/projected/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-kube-api-access-bx99z\") pod \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\" (UID: \"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272\") " Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.230823 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-kube-api-access-bx99z" (OuterVolumeSpecName: "kube-api-access-bx99z") pod "906f3fa0-c7e9-40dc-a876-0e0c9cdbc272" (UID: "906f3fa0-c7e9-40dc-a876-0e0c9cdbc272"). InnerVolumeSpecName "kube-api-access-bx99z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.252748 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-inventory" (OuterVolumeSpecName: "inventory") pod "906f3fa0-c7e9-40dc-a876-0e0c9cdbc272" (UID: "906f3fa0-c7e9-40dc-a876-0e0c9cdbc272"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.252874 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "906f3fa0-c7e9-40dc-a876-0e0c9cdbc272" (UID: "906f3fa0-c7e9-40dc-a876-0e0c9cdbc272"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.324950 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bx99z\" (UniqueName: \"kubernetes.io/projected/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-kube-api-access-bx99z\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.324999 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.325014 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/906f3fa0-c7e9-40dc-a876-0e0c9cdbc272-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.684994 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" event={"ID":"906f3fa0-c7e9-40dc-a876-0e0c9cdbc272","Type":"ContainerDied","Data":"8d7fb3a1a75faa238bac7eee5aabadf5d0eea9855fd1f2517f579d7e0510fae8"} Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.685285 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d7fb3a1a75faa238bac7eee5aabadf5d0eea9855fd1f2517f579d7e0510fae8" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.685880 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.768540 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-c42m9"] Nov 25 09:37:40 crc kubenswrapper[4687]: E1125 09:37:40.769018 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="954c615c-25f5-48ff-96a0-95dd985e135c" containerName="registry-server" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.769040 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="954c615c-25f5-48ff-96a0-95dd985e135c" containerName="registry-server" Nov 25 09:37:40 crc kubenswrapper[4687]: E1125 09:37:40.769063 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerName="extract-content" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.769072 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerName="extract-content" Nov 25 09:37:40 crc kubenswrapper[4687]: E1125 09:37:40.769101 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906f3fa0-c7e9-40dc-a876-0e0c9cdbc272" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.769111 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="906f3fa0-c7e9-40dc-a876-0e0c9cdbc272" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:37:40 crc kubenswrapper[4687]: E1125 09:37:40.769136 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerName="registry-server" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.769145 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerName="registry-server" Nov 25 09:37:40 crc kubenswrapper[4687]: E1125 09:37:40.769160 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="954c615c-25f5-48ff-96a0-95dd985e135c" containerName="extract-utilities" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.769169 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="954c615c-25f5-48ff-96a0-95dd985e135c" containerName="extract-utilities" Nov 25 09:37:40 crc kubenswrapper[4687]: E1125 09:37:40.769178 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerName="extract-utilities" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.769215 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerName="extract-utilities" Nov 25 09:37:40 crc kubenswrapper[4687]: E1125 09:37:40.769226 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="954c615c-25f5-48ff-96a0-95dd985e135c" containerName="extract-content" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.769233 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="954c615c-25f5-48ff-96a0-95dd985e135c" containerName="extract-content" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.769463 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="954c615c-25f5-48ff-96a0-95dd985e135c" containerName="registry-server" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.769490 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="34240012-1d2c-41d0-8e7c-70b4d4e260a8" containerName="registry-server" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.769523 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="906f3fa0-c7e9-40dc-a876-0e0c9cdbc272" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.770321 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.772885 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.772962 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.776086 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-c42m9"] Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.776657 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.777234 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.936700 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8kbt\" (UniqueName: \"kubernetes.io/projected/a2095b70-3311-4f0a-a052-5c8f686fd304-kube-api-access-j8kbt\") pod \"ssh-known-hosts-edpm-deployment-c42m9\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.936797 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-c42m9\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:40 crc kubenswrapper[4687]: I1125 09:37:40.936959 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-c42m9\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:41 crc kubenswrapper[4687]: I1125 09:37:41.038980 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8kbt\" (UniqueName: \"kubernetes.io/projected/a2095b70-3311-4f0a-a052-5c8f686fd304-kube-api-access-j8kbt\") pod \"ssh-known-hosts-edpm-deployment-c42m9\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:41 crc kubenswrapper[4687]: I1125 09:37:41.039102 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-c42m9\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:41 crc kubenswrapper[4687]: I1125 09:37:41.039241 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-c42m9\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:41 crc kubenswrapper[4687]: I1125 09:37:41.044408 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-c42m9\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:41 crc kubenswrapper[4687]: I1125 09:37:41.047888 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-c42m9\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:41 crc kubenswrapper[4687]: I1125 09:37:41.061681 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8kbt\" (UniqueName: \"kubernetes.io/projected/a2095b70-3311-4f0a-a052-5c8f686fd304-kube-api-access-j8kbt\") pod \"ssh-known-hosts-edpm-deployment-c42m9\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:41 crc kubenswrapper[4687]: I1125 09:37:41.091904 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:41 crc kubenswrapper[4687]: I1125 09:37:41.604276 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-c42m9"] Nov 25 09:37:41 crc kubenswrapper[4687]: W1125 09:37:41.609060 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2095b70_3311_4f0a_a052_5c8f686fd304.slice/crio-1717727b108b6808abd76bf75a13af0669a5e0c85bea015b544678b01213938a WatchSource:0}: Error finding container 1717727b108b6808abd76bf75a13af0669a5e0c85bea015b544678b01213938a: Status 404 returned error can't find the container with id 1717727b108b6808abd76bf75a13af0669a5e0c85bea015b544678b01213938a Nov 25 09:37:41 crc kubenswrapper[4687]: I1125 09:37:41.613081 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:37:41 crc kubenswrapper[4687]: I1125 09:37:41.695073 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" event={"ID":"a2095b70-3311-4f0a-a052-5c8f686fd304","Type":"ContainerStarted","Data":"1717727b108b6808abd76bf75a13af0669a5e0c85bea015b544678b01213938a"} Nov 25 09:37:43 crc kubenswrapper[4687]: I1125 09:37:43.729962 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" event={"ID":"a2095b70-3311-4f0a-a052-5c8f686fd304","Type":"ContainerStarted","Data":"0389f928a11978d20b9a45cab335dd6bc254127561a06f01f2dc8f70cc09a1a9"} Nov 25 09:37:43 crc kubenswrapper[4687]: I1125 09:37:43.756862 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" podStartSLOduration=2.7191491660000002 podStartE2EDuration="3.756838218s" podCreationTimestamp="2025-11-25 09:37:40 +0000 UTC" firstStartedPulling="2025-11-25 09:37:41.612904011 +0000 UTC m=+2056.666543729" lastFinishedPulling="2025-11-25 09:37:42.650593023 +0000 UTC m=+2057.704232781" observedRunningTime="2025-11-25 09:37:43.747185686 +0000 UTC m=+2058.800825424" watchObservedRunningTime="2025-11-25 09:37:43.756838218 +0000 UTC m=+2058.810477936" Nov 25 09:37:49 crc kubenswrapper[4687]: I1125 09:37:49.781092 4687 generic.go:334] "Generic (PLEG): container finished" podID="a2095b70-3311-4f0a-a052-5c8f686fd304" containerID="0389f928a11978d20b9a45cab335dd6bc254127561a06f01f2dc8f70cc09a1a9" exitCode=0 Nov 25 09:37:49 crc kubenswrapper[4687]: I1125 09:37:49.781153 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" event={"ID":"a2095b70-3311-4f0a-a052-5c8f686fd304","Type":"ContainerDied","Data":"0389f928a11978d20b9a45cab335dd6bc254127561a06f01f2dc8f70cc09a1a9"} Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.280362 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.438678 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8kbt\" (UniqueName: \"kubernetes.io/projected/a2095b70-3311-4f0a-a052-5c8f686fd304-kube-api-access-j8kbt\") pod \"a2095b70-3311-4f0a-a052-5c8f686fd304\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.438782 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-ssh-key-openstack-edpm-ipam\") pod \"a2095b70-3311-4f0a-a052-5c8f686fd304\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.438852 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-inventory-0\") pod \"a2095b70-3311-4f0a-a052-5c8f686fd304\" (UID: \"a2095b70-3311-4f0a-a052-5c8f686fd304\") " Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.462861 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2095b70-3311-4f0a-a052-5c8f686fd304-kube-api-access-j8kbt" (OuterVolumeSpecName: "kube-api-access-j8kbt") pod "a2095b70-3311-4f0a-a052-5c8f686fd304" (UID: "a2095b70-3311-4f0a-a052-5c8f686fd304"). InnerVolumeSpecName "kube-api-access-j8kbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.471188 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "a2095b70-3311-4f0a-a052-5c8f686fd304" (UID: "a2095b70-3311-4f0a-a052-5c8f686fd304"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.482835 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a2095b70-3311-4f0a-a052-5c8f686fd304" (UID: "a2095b70-3311-4f0a-a052-5c8f686fd304"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.541385 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8kbt\" (UniqueName: \"kubernetes.io/projected/a2095b70-3311-4f0a-a052-5c8f686fd304-kube-api-access-j8kbt\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.541446 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.541476 4687 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a2095b70-3311-4f0a-a052-5c8f686fd304-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.799881 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" event={"ID":"a2095b70-3311-4f0a-a052-5c8f686fd304","Type":"ContainerDied","Data":"1717727b108b6808abd76bf75a13af0669a5e0c85bea015b544678b01213938a"} Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.799923 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1717727b108b6808abd76bf75a13af0669a5e0c85bea015b544678b01213938a" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.799973 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-c42m9" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.887244 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94"] Nov 25 09:37:51 crc kubenswrapper[4687]: E1125 09:37:51.887724 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2095b70-3311-4f0a-a052-5c8f686fd304" containerName="ssh-known-hosts-edpm-deployment" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.887746 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2095b70-3311-4f0a-a052-5c8f686fd304" containerName="ssh-known-hosts-edpm-deployment" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.888019 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2095b70-3311-4f0a-a052-5c8f686fd304" containerName="ssh-known-hosts-edpm-deployment" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.888740 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.894204 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.894463 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.894599 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.894663 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:37:51 crc kubenswrapper[4687]: I1125 09:37:51.896384 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94"] Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.052339 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lvqv\" (UniqueName: \"kubernetes.io/projected/84e0ed5c-dd14-40ef-bc65-5066ae662f34-kube-api-access-2lvqv\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hcd94\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.052490 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hcd94\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.052590 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hcd94\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.154229 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hcd94\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.154381 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hcd94\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.154572 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lvqv\" (UniqueName: \"kubernetes.io/projected/84e0ed5c-dd14-40ef-bc65-5066ae662f34-kube-api-access-2lvqv\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hcd94\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.159480 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hcd94\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.169282 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hcd94\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.172092 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lvqv\" (UniqueName: \"kubernetes.io/projected/84e0ed5c-dd14-40ef-bc65-5066ae662f34-kube-api-access-2lvqv\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hcd94\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.211408 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.538935 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94"] Nov 25 09:37:52 crc kubenswrapper[4687]: I1125 09:37:52.813470 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" event={"ID":"84e0ed5c-dd14-40ef-bc65-5066ae662f34","Type":"ContainerStarted","Data":"a0c2bc4e49091ad9c4e350c29659cb0f29c70b45812f400d77374ee4c8fa1b39"} Nov 25 09:37:53 crc kubenswrapper[4687]: I1125 09:37:53.821634 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" event={"ID":"84e0ed5c-dd14-40ef-bc65-5066ae662f34","Type":"ContainerStarted","Data":"eac76612e4ae9656093c8442876e70878bfb57841686f78e0fdb82c5a210980f"} Nov 25 09:37:53 crc kubenswrapper[4687]: I1125 09:37:53.837905 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" podStartSLOduration=2.03800332 podStartE2EDuration="2.83789083s" podCreationTimestamp="2025-11-25 09:37:51 +0000 UTC" firstStartedPulling="2025-11-25 09:37:52.54789015 +0000 UTC m=+2067.601529868" lastFinishedPulling="2025-11-25 09:37:53.34777765 +0000 UTC m=+2068.401417378" observedRunningTime="2025-11-25 09:37:53.837447837 +0000 UTC m=+2068.891087575" watchObservedRunningTime="2025-11-25 09:37:53.83789083 +0000 UTC m=+2068.891530548" Nov 25 09:38:01 crc kubenswrapper[4687]: I1125 09:38:01.887054 4687 generic.go:334] "Generic (PLEG): container finished" podID="84e0ed5c-dd14-40ef-bc65-5066ae662f34" containerID="eac76612e4ae9656093c8442876e70878bfb57841686f78e0fdb82c5a210980f" exitCode=0 Nov 25 09:38:01 crc kubenswrapper[4687]: I1125 09:38:01.887141 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" event={"ID":"84e0ed5c-dd14-40ef-bc65-5066ae662f34","Type":"ContainerDied","Data":"eac76612e4ae9656093c8442876e70878bfb57841686f78e0fdb82c5a210980f"} Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.298466 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.465140 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-inventory\") pod \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.465274 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lvqv\" (UniqueName: \"kubernetes.io/projected/84e0ed5c-dd14-40ef-bc65-5066ae662f34-kube-api-access-2lvqv\") pod \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.465311 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-ssh-key\") pod \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\" (UID: \"84e0ed5c-dd14-40ef-bc65-5066ae662f34\") " Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.471853 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84e0ed5c-dd14-40ef-bc65-5066ae662f34-kube-api-access-2lvqv" (OuterVolumeSpecName: "kube-api-access-2lvqv") pod "84e0ed5c-dd14-40ef-bc65-5066ae662f34" (UID: "84e0ed5c-dd14-40ef-bc65-5066ae662f34"). InnerVolumeSpecName "kube-api-access-2lvqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.497722 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-inventory" (OuterVolumeSpecName: "inventory") pod "84e0ed5c-dd14-40ef-bc65-5066ae662f34" (UID: "84e0ed5c-dd14-40ef-bc65-5066ae662f34"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.497772 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "84e0ed5c-dd14-40ef-bc65-5066ae662f34" (UID: "84e0ed5c-dd14-40ef-bc65-5066ae662f34"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.567983 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lvqv\" (UniqueName: \"kubernetes.io/projected/84e0ed5c-dd14-40ef-bc65-5066ae662f34-kube-api-access-2lvqv\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.568026 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.568039 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84e0ed5c-dd14-40ef-bc65-5066ae662f34-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.907144 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" event={"ID":"84e0ed5c-dd14-40ef-bc65-5066ae662f34","Type":"ContainerDied","Data":"a0c2bc4e49091ad9c4e350c29659cb0f29c70b45812f400d77374ee4c8fa1b39"} Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.907449 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0c2bc4e49091ad9c4e350c29659cb0f29c70b45812f400d77374ee4c8fa1b39" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.907573 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hcd94" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.973847 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68"] Nov 25 09:38:03 crc kubenswrapper[4687]: E1125 09:38:03.974532 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84e0ed5c-dd14-40ef-bc65-5066ae662f34" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.974558 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="84e0ed5c-dd14-40ef-bc65-5066ae662f34" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.974856 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="84e0ed5c-dd14-40ef-bc65-5066ae662f34" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.975749 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.978558 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.978607 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.978636 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.978981 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:38:03 crc kubenswrapper[4687]: I1125 09:38:03.984647 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68"] Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.080011 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.080083 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgvck\" (UniqueName: \"kubernetes.io/projected/9896502d-3c95-47e0-b75a-855221a19ebc-kube-api-access-hgvck\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.080172 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.181756 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.181843 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgvck\" (UniqueName: \"kubernetes.io/projected/9896502d-3c95-47e0-b75a-855221a19ebc-kube-api-access-hgvck\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.181943 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.186378 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.195205 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.205968 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgvck\" (UniqueName: \"kubernetes.io/projected/9896502d-3c95-47e0-b75a-855221a19ebc-kube-api-access-hgvck\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.305489 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.809014 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68"] Nov 25 09:38:04 crc kubenswrapper[4687]: W1125 09:38:04.815334 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9896502d_3c95_47e0_b75a_855221a19ebc.slice/crio-e93c74e70f1cdbf0e277b594ee7f49bb954fa2fc1fd3ddabe9d3231ca3f8df06 WatchSource:0}: Error finding container e93c74e70f1cdbf0e277b594ee7f49bb954fa2fc1fd3ddabe9d3231ca3f8df06: Status 404 returned error can't find the container with id e93c74e70f1cdbf0e277b594ee7f49bb954fa2fc1fd3ddabe9d3231ca3f8df06 Nov 25 09:38:04 crc kubenswrapper[4687]: I1125 09:38:04.916427 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" event={"ID":"9896502d-3c95-47e0-b75a-855221a19ebc","Type":"ContainerStarted","Data":"e93c74e70f1cdbf0e277b594ee7f49bb954fa2fc1fd3ddabe9d3231ca3f8df06"} Nov 25 09:38:05 crc kubenswrapper[4687]: I1125 09:38:05.927775 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" event={"ID":"9896502d-3c95-47e0-b75a-855221a19ebc","Type":"ContainerStarted","Data":"77aef01320c04a758ecac96a5c3e7587d5f2fcac60a8dbb50046fafca35283b3"} Nov 25 09:38:05 crc kubenswrapper[4687]: I1125 09:38:05.952090 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" podStartSLOduration=2.380343269 podStartE2EDuration="2.952068297s" podCreationTimestamp="2025-11-25 09:38:03 +0000 UTC" firstStartedPulling="2025-11-25 09:38:04.818254752 +0000 UTC m=+2079.871894470" lastFinishedPulling="2025-11-25 09:38:05.38997978 +0000 UTC m=+2080.443619498" observedRunningTime="2025-11-25 09:38:05.947683118 +0000 UTC m=+2081.001322836" watchObservedRunningTime="2025-11-25 09:38:05.952068297 +0000 UTC m=+2081.005708035" Nov 25 09:38:16 crc kubenswrapper[4687]: I1125 09:38:16.029620 4687 generic.go:334] "Generic (PLEG): container finished" podID="9896502d-3c95-47e0-b75a-855221a19ebc" containerID="77aef01320c04a758ecac96a5c3e7587d5f2fcac60a8dbb50046fafca35283b3" exitCode=0 Nov 25 09:38:16 crc kubenswrapper[4687]: I1125 09:38:16.029698 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" event={"ID":"9896502d-3c95-47e0-b75a-855221a19ebc","Type":"ContainerDied","Data":"77aef01320c04a758ecac96a5c3e7587d5f2fcac60a8dbb50046fafca35283b3"} Nov 25 09:38:17 crc kubenswrapper[4687]: I1125 09:38:17.446283 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:17 crc kubenswrapper[4687]: I1125 09:38:17.540808 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-inventory\") pod \"9896502d-3c95-47e0-b75a-855221a19ebc\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " Nov 25 09:38:17 crc kubenswrapper[4687]: I1125 09:38:17.541252 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-ssh-key\") pod \"9896502d-3c95-47e0-b75a-855221a19ebc\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " Nov 25 09:38:17 crc kubenswrapper[4687]: I1125 09:38:17.541393 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgvck\" (UniqueName: \"kubernetes.io/projected/9896502d-3c95-47e0-b75a-855221a19ebc-kube-api-access-hgvck\") pod \"9896502d-3c95-47e0-b75a-855221a19ebc\" (UID: \"9896502d-3c95-47e0-b75a-855221a19ebc\") " Nov 25 09:38:17 crc kubenswrapper[4687]: I1125 09:38:17.546705 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9896502d-3c95-47e0-b75a-855221a19ebc-kube-api-access-hgvck" (OuterVolumeSpecName: "kube-api-access-hgvck") pod "9896502d-3c95-47e0-b75a-855221a19ebc" (UID: "9896502d-3c95-47e0-b75a-855221a19ebc"). InnerVolumeSpecName "kube-api-access-hgvck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:38:17 crc kubenswrapper[4687]: I1125 09:38:17.568731 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9896502d-3c95-47e0-b75a-855221a19ebc" (UID: "9896502d-3c95-47e0-b75a-855221a19ebc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:38:17 crc kubenswrapper[4687]: I1125 09:38:17.570387 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-inventory" (OuterVolumeSpecName: "inventory") pod "9896502d-3c95-47e0-b75a-855221a19ebc" (UID: "9896502d-3c95-47e0-b75a-855221a19ebc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:38:17 crc kubenswrapper[4687]: I1125 09:38:17.644240 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:17 crc kubenswrapper[4687]: I1125 09:38:17.644276 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgvck\" (UniqueName: \"kubernetes.io/projected/9896502d-3c95-47e0-b75a-855221a19ebc-kube-api-access-hgvck\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:17 crc kubenswrapper[4687]: I1125 09:38:17.644292 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9896502d-3c95-47e0-b75a-855221a19ebc-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.054378 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" event={"ID":"9896502d-3c95-47e0-b75a-855221a19ebc","Type":"ContainerDied","Data":"e93c74e70f1cdbf0e277b594ee7f49bb954fa2fc1fd3ddabe9d3231ca3f8df06"} Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.054743 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e93c74e70f1cdbf0e277b594ee7f49bb954fa2fc1fd3ddabe9d3231ca3f8df06" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.054823 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.139141 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd"] Nov 25 09:38:18 crc kubenswrapper[4687]: E1125 09:38:18.139819 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9896502d-3c95-47e0-b75a-855221a19ebc" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.139940 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="9896502d-3c95-47e0-b75a-855221a19ebc" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.140219 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="9896502d-3c95-47e0-b75a-855221a19ebc" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.142299 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.146634 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.146785 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.147466 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.147474 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.147687 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.147837 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.147947 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.148316 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.161842 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd"] Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.255997 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.256077 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.256134 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.256309 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qr4tq\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-kube-api-access-qr4tq\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.256465 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.256591 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.256674 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.256771 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.256838 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.256958 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.257156 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.257414 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.257489 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.257565 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.359350 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.359401 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360307 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360363 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360415 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360452 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360492 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qr4tq\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-kube-api-access-qr4tq\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360531 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360551 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360581 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360615 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360642 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360669 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.360724 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.366728 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.366907 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.367034 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.367407 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.367695 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.368171 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.368363 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.368537 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.368734 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.370080 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.370930 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.372348 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.374022 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.397178 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qr4tq\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-kube-api-access-qr4tq\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:18 crc kubenswrapper[4687]: I1125 09:38:18.471988 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:38:19 crc kubenswrapper[4687]: I1125 09:38:19.033706 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd"] Nov 25 09:38:19 crc kubenswrapper[4687]: I1125 09:38:19.065543 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" event={"ID":"04071c43-9814-4c88-bd7e-f3b1c83f9dfc","Type":"ContainerStarted","Data":"e5a8223633fd991c9955fa7cb4643fef8ea2888e84733388058dd295cd37e722"} Nov 25 09:38:21 crc kubenswrapper[4687]: I1125 09:38:21.110875 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" event={"ID":"04071c43-9814-4c88-bd7e-f3b1c83f9dfc","Type":"ContainerStarted","Data":"2fdcc1b7c277d438def2e7672413bf0989ab9405c22ab6f7f405db3e514032ee"} Nov 25 09:38:21 crc kubenswrapper[4687]: I1125 09:38:21.153478 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" podStartSLOduration=2.14531667 podStartE2EDuration="3.153456779s" podCreationTimestamp="2025-11-25 09:38:18 +0000 UTC" firstStartedPulling="2025-11-25 09:38:19.039844475 +0000 UTC m=+2094.093484193" lastFinishedPulling="2025-11-25 09:38:20.047984584 +0000 UTC m=+2095.101624302" observedRunningTime="2025-11-25 09:38:21.135229483 +0000 UTC m=+2096.188869241" watchObservedRunningTime="2025-11-25 09:38:21.153456779 +0000 UTC m=+2096.207096497" Nov 25 09:38:53 crc kubenswrapper[4687]: I1125 09:38:53.844844 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:38:53 crc kubenswrapper[4687]: I1125 09:38:53.845388 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:38:59 crc kubenswrapper[4687]: I1125 09:38:59.438009 4687 generic.go:334] "Generic (PLEG): container finished" podID="04071c43-9814-4c88-bd7e-f3b1c83f9dfc" containerID="2fdcc1b7c277d438def2e7672413bf0989ab9405c22ab6f7f405db3e514032ee" exitCode=0 Nov 25 09:38:59 crc kubenswrapper[4687]: I1125 09:38:59.438120 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" event={"ID":"04071c43-9814-4c88-bd7e-f3b1c83f9dfc","Type":"ContainerDied","Data":"2fdcc1b7c277d438def2e7672413bf0989ab9405c22ab6f7f405db3e514032ee"} Nov 25 09:39:00 crc kubenswrapper[4687]: I1125 09:39:00.926673 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102142 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-libvirt-combined-ca-bundle\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102206 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ssh-key\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102228 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ovn-combined-ca-bundle\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102313 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-ovn-default-certs-0\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102352 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102373 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102397 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-bootstrap-combined-ca-bundle\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102443 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-neutron-metadata-combined-ca-bundle\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102539 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-nova-combined-ca-bundle\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102566 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-telemetry-combined-ca-bundle\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102585 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-inventory\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102613 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102647 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qr4tq\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-kube-api-access-qr4tq\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.102689 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-repo-setup-combined-ca-bundle\") pod \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\" (UID: \"04071c43-9814-4c88-bd7e-f3b1c83f9dfc\") " Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.108229 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.108992 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.109012 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.110843 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.111013 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.111207 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.112786 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.113532 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.114650 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.114900 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.115305 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.116590 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-kube-api-access-qr4tq" (OuterVolumeSpecName: "kube-api-access-qr4tq") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "kube-api-access-qr4tq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.141726 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.143025 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-inventory" (OuterVolumeSpecName: "inventory") pod "04071c43-9814-4c88-bd7e-f3b1c83f9dfc" (UID: "04071c43-9814-4c88-bd7e-f3b1c83f9dfc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205187 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205228 4687 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205314 4687 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205332 4687 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205346 4687 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205359 4687 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205372 4687 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205384 4687 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205396 4687 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205406 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205422 4687 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205433 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qr4tq\" (UniqueName: \"kubernetes.io/projected/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-kube-api-access-qr4tq\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205445 4687 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.205457 4687 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04071c43-9814-4c88-bd7e-f3b1c83f9dfc-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.458277 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" event={"ID":"04071c43-9814-4c88-bd7e-f3b1c83f9dfc","Type":"ContainerDied","Data":"e5a8223633fd991c9955fa7cb4643fef8ea2888e84733388058dd295cd37e722"} Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.458694 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5a8223633fd991c9955fa7cb4643fef8ea2888e84733388058dd295cd37e722" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.458762 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.557367 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh"] Nov 25 09:39:01 crc kubenswrapper[4687]: E1125 09:39:01.557777 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04071c43-9814-4c88-bd7e-f3b1c83f9dfc" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.557796 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="04071c43-9814-4c88-bd7e-f3b1c83f9dfc" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.558002 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="04071c43-9814-4c88-bd7e-f3b1c83f9dfc" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.558638 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.568294 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.568329 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh"] Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.568745 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.569421 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.570988 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.575792 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.611033 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.611094 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.611198 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.611237 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.611253 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m98xb\" (UniqueName: \"kubernetes.io/projected/db45ca10-ced6-46c4-84e8-ac525cd596b4-kube-api-access-m98xb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.712391 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.712459 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.712625 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.712838 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.713415 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.713445 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m98xb\" (UniqueName: \"kubernetes.io/projected/db45ca10-ced6-46c4-84e8-ac525cd596b4-kube-api-access-m98xb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.717227 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.717273 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.717317 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.731916 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m98xb\" (UniqueName: \"kubernetes.io/projected/db45ca10-ced6-46c4-84e8-ac525cd596b4-kube-api-access-m98xb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wwsqh\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:01 crc kubenswrapper[4687]: I1125 09:39:01.879261 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:39:02 crc kubenswrapper[4687]: I1125 09:39:02.422424 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh"] Nov 25 09:39:02 crc kubenswrapper[4687]: I1125 09:39:02.470462 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" event={"ID":"db45ca10-ced6-46c4-84e8-ac525cd596b4","Type":"ContainerStarted","Data":"1bedca4c63665443721b8085d02fa8e021d85a8ac80b476e70ed714b0099dd77"} Nov 25 09:39:04 crc kubenswrapper[4687]: I1125 09:39:04.492149 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" event={"ID":"db45ca10-ced6-46c4-84e8-ac525cd596b4","Type":"ContainerStarted","Data":"4afa53688d2ba82be225e61667598bcc439fc85492420940e4c89e1b00ca648b"} Nov 25 09:39:04 crc kubenswrapper[4687]: I1125 09:39:04.530784 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" podStartSLOduration=2.6913911390000003 podStartE2EDuration="3.530765801s" podCreationTimestamp="2025-11-25 09:39:01 +0000 UTC" firstStartedPulling="2025-11-25 09:39:02.433821171 +0000 UTC m=+2137.487460889" lastFinishedPulling="2025-11-25 09:39:03.273195833 +0000 UTC m=+2138.326835551" observedRunningTime="2025-11-25 09:39:04.528210912 +0000 UTC m=+2139.581850640" watchObservedRunningTime="2025-11-25 09:39:04.530765801 +0000 UTC m=+2139.584405529" Nov 25 09:39:20 crc kubenswrapper[4687]: I1125 09:39:20.896832 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qm545"] Nov 25 09:39:20 crc kubenswrapper[4687]: I1125 09:39:20.900007 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:20 crc kubenswrapper[4687]: I1125 09:39:20.915543 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qm545"] Nov 25 09:39:20 crc kubenswrapper[4687]: I1125 09:39:20.952166 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-catalog-content\") pod \"redhat-marketplace-qm545\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:20 crc kubenswrapper[4687]: I1125 09:39:20.952477 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfwvq\" (UniqueName: \"kubernetes.io/projected/07a5c838-f04b-486a-a120-dece44ac206e-kube-api-access-vfwvq\") pod \"redhat-marketplace-qm545\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:20 crc kubenswrapper[4687]: I1125 09:39:20.952656 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-utilities\") pod \"redhat-marketplace-qm545\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:21 crc kubenswrapper[4687]: I1125 09:39:21.054702 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-utilities\") pod \"redhat-marketplace-qm545\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:21 crc kubenswrapper[4687]: I1125 09:39:21.054770 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-catalog-content\") pod \"redhat-marketplace-qm545\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:21 crc kubenswrapper[4687]: I1125 09:39:21.054872 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfwvq\" (UniqueName: \"kubernetes.io/projected/07a5c838-f04b-486a-a120-dece44ac206e-kube-api-access-vfwvq\") pod \"redhat-marketplace-qm545\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:21 crc kubenswrapper[4687]: I1125 09:39:21.055169 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-utilities\") pod \"redhat-marketplace-qm545\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:21 crc kubenswrapper[4687]: I1125 09:39:21.055449 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-catalog-content\") pod \"redhat-marketplace-qm545\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:21 crc kubenswrapper[4687]: I1125 09:39:21.082857 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfwvq\" (UniqueName: \"kubernetes.io/projected/07a5c838-f04b-486a-a120-dece44ac206e-kube-api-access-vfwvq\") pod \"redhat-marketplace-qm545\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:21 crc kubenswrapper[4687]: I1125 09:39:21.254857 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:21 crc kubenswrapper[4687]: I1125 09:39:21.698704 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qm545"] Nov 25 09:39:21 crc kubenswrapper[4687]: W1125 09:39:21.699804 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07a5c838_f04b_486a_a120_dece44ac206e.slice/crio-cd106959c8cdce4e201ef35c327df42a66edc2cbaaf5bb573ac6b48bb44294e8 WatchSource:0}: Error finding container cd106959c8cdce4e201ef35c327df42a66edc2cbaaf5bb573ac6b48bb44294e8: Status 404 returned error can't find the container with id cd106959c8cdce4e201ef35c327df42a66edc2cbaaf5bb573ac6b48bb44294e8 Nov 25 09:39:22 crc kubenswrapper[4687]: I1125 09:39:22.662122 4687 generic.go:334] "Generic (PLEG): container finished" podID="07a5c838-f04b-486a-a120-dece44ac206e" containerID="3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455" exitCode=0 Nov 25 09:39:22 crc kubenswrapper[4687]: I1125 09:39:22.662169 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qm545" event={"ID":"07a5c838-f04b-486a-a120-dece44ac206e","Type":"ContainerDied","Data":"3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455"} Nov 25 09:39:22 crc kubenswrapper[4687]: I1125 09:39:22.662413 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qm545" event={"ID":"07a5c838-f04b-486a-a120-dece44ac206e","Type":"ContainerStarted","Data":"cd106959c8cdce4e201ef35c327df42a66edc2cbaaf5bb573ac6b48bb44294e8"} Nov 25 09:39:23 crc kubenswrapper[4687]: I1125 09:39:23.671712 4687 generic.go:334] "Generic (PLEG): container finished" podID="07a5c838-f04b-486a-a120-dece44ac206e" containerID="9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e" exitCode=0 Nov 25 09:39:23 crc kubenswrapper[4687]: I1125 09:39:23.671773 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qm545" event={"ID":"07a5c838-f04b-486a-a120-dece44ac206e","Type":"ContainerDied","Data":"9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e"} Nov 25 09:39:23 crc kubenswrapper[4687]: I1125 09:39:23.845010 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:39:23 crc kubenswrapper[4687]: I1125 09:39:23.845448 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:39:24 crc kubenswrapper[4687]: I1125 09:39:24.683017 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qm545" event={"ID":"07a5c838-f04b-486a-a120-dece44ac206e","Type":"ContainerStarted","Data":"b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d"} Nov 25 09:39:24 crc kubenswrapper[4687]: I1125 09:39:24.707045 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qm545" podStartSLOduration=3.036622971 podStartE2EDuration="4.707023079s" podCreationTimestamp="2025-11-25 09:39:20 +0000 UTC" firstStartedPulling="2025-11-25 09:39:22.663948942 +0000 UTC m=+2157.717588660" lastFinishedPulling="2025-11-25 09:39:24.33434905 +0000 UTC m=+2159.387988768" observedRunningTime="2025-11-25 09:39:24.700939893 +0000 UTC m=+2159.754579621" watchObservedRunningTime="2025-11-25 09:39:24.707023079 +0000 UTC m=+2159.760662797" Nov 25 09:39:31 crc kubenswrapper[4687]: I1125 09:39:31.255680 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:31 crc kubenswrapper[4687]: I1125 09:39:31.256251 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:31 crc kubenswrapper[4687]: I1125 09:39:31.302073 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:31 crc kubenswrapper[4687]: I1125 09:39:31.792685 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:31 crc kubenswrapper[4687]: I1125 09:39:31.838308 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qm545"] Nov 25 09:39:33 crc kubenswrapper[4687]: I1125 09:39:33.764935 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qm545" podUID="07a5c838-f04b-486a-a120-dece44ac206e" containerName="registry-server" containerID="cri-o://b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d" gracePeriod=2 Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.587852 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.712372 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfwvq\" (UniqueName: \"kubernetes.io/projected/07a5c838-f04b-486a-a120-dece44ac206e-kube-api-access-vfwvq\") pod \"07a5c838-f04b-486a-a120-dece44ac206e\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.712831 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-catalog-content\") pod \"07a5c838-f04b-486a-a120-dece44ac206e\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.713069 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-utilities\") pod \"07a5c838-f04b-486a-a120-dece44ac206e\" (UID: \"07a5c838-f04b-486a-a120-dece44ac206e\") " Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.713824 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-utilities" (OuterVolumeSpecName: "utilities") pod "07a5c838-f04b-486a-a120-dece44ac206e" (UID: "07a5c838-f04b-486a-a120-dece44ac206e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.731756 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07a5c838-f04b-486a-a120-dece44ac206e-kube-api-access-vfwvq" (OuterVolumeSpecName: "kube-api-access-vfwvq") pod "07a5c838-f04b-486a-a120-dece44ac206e" (UID: "07a5c838-f04b-486a-a120-dece44ac206e"). InnerVolumeSpecName "kube-api-access-vfwvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.737384 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07a5c838-f04b-486a-a120-dece44ac206e" (UID: "07a5c838-f04b-486a-a120-dece44ac206e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.775919 4687 generic.go:334] "Generic (PLEG): container finished" podID="07a5c838-f04b-486a-a120-dece44ac206e" containerID="b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d" exitCode=0 Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.775963 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qm545" event={"ID":"07a5c838-f04b-486a-a120-dece44ac206e","Type":"ContainerDied","Data":"b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d"} Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.775989 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qm545" event={"ID":"07a5c838-f04b-486a-a120-dece44ac206e","Type":"ContainerDied","Data":"cd106959c8cdce4e201ef35c327df42a66edc2cbaaf5bb573ac6b48bb44294e8"} Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.776007 4687 scope.go:117] "RemoveContainer" containerID="b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.776006 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qm545" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.800585 4687 scope.go:117] "RemoveContainer" containerID="9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.828988 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.829018 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfwvq\" (UniqueName: \"kubernetes.io/projected/07a5c838-f04b-486a-a120-dece44ac206e-kube-api-access-vfwvq\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.829031 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07a5c838-f04b-486a-a120-dece44ac206e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.833175 4687 scope.go:117] "RemoveContainer" containerID="3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.846912 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qm545"] Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.856208 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qm545"] Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.884530 4687 scope.go:117] "RemoveContainer" containerID="b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d" Nov 25 09:39:34 crc kubenswrapper[4687]: E1125 09:39:34.884972 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d\": container with ID starting with b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d not found: ID does not exist" containerID="b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.885004 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d"} err="failed to get container status \"b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d\": rpc error: code = NotFound desc = could not find container \"b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d\": container with ID starting with b640a33a08be4be1109769e9b65482d321e00b3b72cb80e2173d210371a1029d not found: ID does not exist" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.885025 4687 scope.go:117] "RemoveContainer" containerID="9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e" Nov 25 09:39:34 crc kubenswrapper[4687]: E1125 09:39:34.885531 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e\": container with ID starting with 9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e not found: ID does not exist" containerID="9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.885594 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e"} err="failed to get container status \"9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e\": rpc error: code = NotFound desc = could not find container \"9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e\": container with ID starting with 9e3fa11c7faf0d23019d8230ebaa58ca861dfd1f60890b5146a74f723dbcc20e not found: ID does not exist" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.885630 4687 scope.go:117] "RemoveContainer" containerID="3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455" Nov 25 09:39:34 crc kubenswrapper[4687]: E1125 09:39:34.886068 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455\": container with ID starting with 3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455 not found: ID does not exist" containerID="3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455" Nov 25 09:39:34 crc kubenswrapper[4687]: I1125 09:39:34.886103 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455"} err="failed to get container status \"3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455\": rpc error: code = NotFound desc = could not find container \"3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455\": container with ID starting with 3e28c2b270b7b38d82d6aa4b090d12996afe567daabb777b3311bee5a7e69455 not found: ID does not exist" Nov 25 09:39:35 crc kubenswrapper[4687]: I1125 09:39:35.747633 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07a5c838-f04b-486a-a120-dece44ac206e" path="/var/lib/kubelet/pods/07a5c838-f04b-486a-a120-dece44ac206e/volumes" Nov 25 09:39:53 crc kubenswrapper[4687]: I1125 09:39:53.845195 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:39:53 crc kubenswrapper[4687]: I1125 09:39:53.845820 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:39:53 crc kubenswrapper[4687]: I1125 09:39:53.845864 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:39:53 crc kubenswrapper[4687]: I1125 09:39:53.846577 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f232f6663409036e1916f49b44a9d348d4367ab37376d99fe2b76c8655d5ccae"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:39:53 crc kubenswrapper[4687]: I1125 09:39:53.846622 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://f232f6663409036e1916f49b44a9d348d4367ab37376d99fe2b76c8655d5ccae" gracePeriod=600 Nov 25 09:39:54 crc kubenswrapper[4687]: I1125 09:39:54.954269 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="f232f6663409036e1916f49b44a9d348d4367ab37376d99fe2b76c8655d5ccae" exitCode=0 Nov 25 09:39:54 crc kubenswrapper[4687]: I1125 09:39:54.954372 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"f232f6663409036e1916f49b44a9d348d4367ab37376d99fe2b76c8655d5ccae"} Nov 25 09:39:54 crc kubenswrapper[4687]: I1125 09:39:54.955001 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e"} Nov 25 09:39:54 crc kubenswrapper[4687]: I1125 09:39:54.955030 4687 scope.go:117] "RemoveContainer" containerID="8c013d1dd11258b43b2288cd44bb523e690ed89eb33a1328c68283c864575895" Nov 25 09:40:06 crc kubenswrapper[4687]: I1125 09:40:06.065806 4687 generic.go:334] "Generic (PLEG): container finished" podID="db45ca10-ced6-46c4-84e8-ac525cd596b4" containerID="4afa53688d2ba82be225e61667598bcc439fc85492420940e4c89e1b00ca648b" exitCode=0 Nov 25 09:40:06 crc kubenswrapper[4687]: I1125 09:40:06.065950 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" event={"ID":"db45ca10-ced6-46c4-84e8-ac525cd596b4","Type":"ContainerDied","Data":"4afa53688d2ba82be225e61667598bcc439fc85492420940e4c89e1b00ca648b"} Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.503062 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.584123 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m98xb\" (UniqueName: \"kubernetes.io/projected/db45ca10-ced6-46c4-84e8-ac525cd596b4-kube-api-access-m98xb\") pod \"db45ca10-ced6-46c4-84e8-ac525cd596b4\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.584205 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-inventory\") pod \"db45ca10-ced6-46c4-84e8-ac525cd596b4\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.584368 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ssh-key\") pod \"db45ca10-ced6-46c4-84e8-ac525cd596b4\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.584390 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovn-combined-ca-bundle\") pod \"db45ca10-ced6-46c4-84e8-ac525cd596b4\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.584453 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovncontroller-config-0\") pod \"db45ca10-ced6-46c4-84e8-ac525cd596b4\" (UID: \"db45ca10-ced6-46c4-84e8-ac525cd596b4\") " Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.593538 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db45ca10-ced6-46c4-84e8-ac525cd596b4-kube-api-access-m98xb" (OuterVolumeSpecName: "kube-api-access-m98xb") pod "db45ca10-ced6-46c4-84e8-ac525cd596b4" (UID: "db45ca10-ced6-46c4-84e8-ac525cd596b4"). InnerVolumeSpecName "kube-api-access-m98xb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.593405 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "db45ca10-ced6-46c4-84e8-ac525cd596b4" (UID: "db45ca10-ced6-46c4-84e8-ac525cd596b4"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.620934 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "db45ca10-ced6-46c4-84e8-ac525cd596b4" (UID: "db45ca10-ced6-46c4-84e8-ac525cd596b4"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.622309 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-inventory" (OuterVolumeSpecName: "inventory") pod "db45ca10-ced6-46c4-84e8-ac525cd596b4" (UID: "db45ca10-ced6-46c4-84e8-ac525cd596b4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.622923 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "db45ca10-ced6-46c4-84e8-ac525cd596b4" (UID: "db45ca10-ced6-46c4-84e8-ac525cd596b4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.687025 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.687059 4687 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.687073 4687 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/db45ca10-ced6-46c4-84e8-ac525cd596b4-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.687083 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m98xb\" (UniqueName: \"kubernetes.io/projected/db45ca10-ced6-46c4-84e8-ac525cd596b4-kube-api-access-m98xb\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4687]: I1125 09:40:07.687131 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/db45ca10-ced6-46c4-84e8-ac525cd596b4-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.086896 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" event={"ID":"db45ca10-ced6-46c4-84e8-ac525cd596b4","Type":"ContainerDied","Data":"1bedca4c63665443721b8085d02fa8e021d85a8ac80b476e70ed714b0099dd77"} Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.087323 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1bedca4c63665443721b8085d02fa8e021d85a8ac80b476e70ed714b0099dd77" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.086980 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wwsqh" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.177565 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56"] Nov 25 09:40:08 crc kubenswrapper[4687]: E1125 09:40:08.178037 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07a5c838-f04b-486a-a120-dece44ac206e" containerName="registry-server" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.178058 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="07a5c838-f04b-486a-a120-dece44ac206e" containerName="registry-server" Nov 25 09:40:08 crc kubenswrapper[4687]: E1125 09:40:08.178080 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07a5c838-f04b-486a-a120-dece44ac206e" containerName="extract-content" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.178089 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="07a5c838-f04b-486a-a120-dece44ac206e" containerName="extract-content" Nov 25 09:40:08 crc kubenswrapper[4687]: E1125 09:40:08.178104 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07a5c838-f04b-486a-a120-dece44ac206e" containerName="extract-utilities" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.178111 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="07a5c838-f04b-486a-a120-dece44ac206e" containerName="extract-utilities" Nov 25 09:40:08 crc kubenswrapper[4687]: E1125 09:40:08.178143 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db45ca10-ced6-46c4-84e8-ac525cd596b4" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.178151 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="db45ca10-ced6-46c4-84e8-ac525cd596b4" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.178381 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="db45ca10-ced6-46c4-84e8-ac525cd596b4" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.178400 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="07a5c838-f04b-486a-a120-dece44ac206e" containerName="registry-server" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.179151 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.181969 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.182168 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.182428 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.182751 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.182935 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.183093 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.191400 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56"] Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.296859 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4pds\" (UniqueName: \"kubernetes.io/projected/ec0b61ae-ccac-473b-ab43-e21daf1c348e-kube-api-access-l4pds\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.296916 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.296940 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.297134 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.297381 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.297418 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.399007 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.399072 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.399118 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4pds\" (UniqueName: \"kubernetes.io/projected/ec0b61ae-ccac-473b-ab43-e21daf1c348e-kube-api-access-l4pds\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.399147 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.399167 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.399196 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.403940 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.404602 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.404801 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.404899 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.405157 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.417156 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4pds\" (UniqueName: \"kubernetes.io/projected/ec0b61ae-ccac-473b-ab43-e21daf1c348e-kube-api-access-l4pds\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:08 crc kubenswrapper[4687]: I1125 09:40:08.503823 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:09 crc kubenswrapper[4687]: I1125 09:40:09.033060 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56"] Nov 25 09:40:09 crc kubenswrapper[4687]: I1125 09:40:09.099813 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" event={"ID":"ec0b61ae-ccac-473b-ab43-e21daf1c348e","Type":"ContainerStarted","Data":"43d4ceb59646141456d59011861afe7dae73efb248bec0a45cf47565c3a496d9"} Nov 25 09:40:10 crc kubenswrapper[4687]: I1125 09:40:10.113935 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" event={"ID":"ec0b61ae-ccac-473b-ab43-e21daf1c348e","Type":"ContainerStarted","Data":"a4e2939cbf37f847844ba51556f555efacf13e3547087146073a65c5f18f8ffe"} Nov 25 09:40:10 crc kubenswrapper[4687]: I1125 09:40:10.147712 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" podStartSLOduration=1.511079289 podStartE2EDuration="2.147674589s" podCreationTimestamp="2025-11-25 09:40:08 +0000 UTC" firstStartedPulling="2025-11-25 09:40:09.042863943 +0000 UTC m=+2204.096503661" lastFinishedPulling="2025-11-25 09:40:09.679459243 +0000 UTC m=+2204.733098961" observedRunningTime="2025-11-25 09:40:10.138025717 +0000 UTC m=+2205.191665445" watchObservedRunningTime="2025-11-25 09:40:10.147674589 +0000 UTC m=+2205.201314307" Nov 25 09:40:55 crc kubenswrapper[4687]: I1125 09:40:55.520665 4687 generic.go:334] "Generic (PLEG): container finished" podID="ec0b61ae-ccac-473b-ab43-e21daf1c348e" containerID="a4e2939cbf37f847844ba51556f555efacf13e3547087146073a65c5f18f8ffe" exitCode=0 Nov 25 09:40:55 crc kubenswrapper[4687]: I1125 09:40:55.520771 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" event={"ID":"ec0b61ae-ccac-473b-ab43-e21daf1c348e","Type":"ContainerDied","Data":"a4e2939cbf37f847844ba51556f555efacf13e3547087146073a65c5f18f8ffe"} Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.032106 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.232883 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4pds\" (UniqueName: \"kubernetes.io/projected/ec0b61ae-ccac-473b-ab43-e21daf1c348e-kube-api-access-l4pds\") pod \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.232972 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-inventory\") pod \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.233787 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.234049 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-nova-metadata-neutron-config-0\") pod \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.234254 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-ssh-key\") pod \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.234405 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-metadata-combined-ca-bundle\") pod \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\" (UID: \"ec0b61ae-ccac-473b-ab43-e21daf1c348e\") " Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.240301 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "ec0b61ae-ccac-473b-ab43-e21daf1c348e" (UID: "ec0b61ae-ccac-473b-ab43-e21daf1c348e"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.240829 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec0b61ae-ccac-473b-ab43-e21daf1c348e-kube-api-access-l4pds" (OuterVolumeSpecName: "kube-api-access-l4pds") pod "ec0b61ae-ccac-473b-ab43-e21daf1c348e" (UID: "ec0b61ae-ccac-473b-ab43-e21daf1c348e"). InnerVolumeSpecName "kube-api-access-l4pds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.260181 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "ec0b61ae-ccac-473b-ab43-e21daf1c348e" (UID: "ec0b61ae-ccac-473b-ab43-e21daf1c348e"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.266623 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ec0b61ae-ccac-473b-ab43-e21daf1c348e" (UID: "ec0b61ae-ccac-473b-ab43-e21daf1c348e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.267050 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "ec0b61ae-ccac-473b-ab43-e21daf1c348e" (UID: "ec0b61ae-ccac-473b-ab43-e21daf1c348e"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.271464 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-inventory" (OuterVolumeSpecName: "inventory") pod "ec0b61ae-ccac-473b-ab43-e21daf1c348e" (UID: "ec0b61ae-ccac-473b-ab43-e21daf1c348e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.337644 4687 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.337691 4687 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.337714 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.337730 4687 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.337747 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4pds\" (UniqueName: \"kubernetes.io/projected/ec0b61ae-ccac-473b-ab43-e21daf1c348e-kube-api-access-l4pds\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.337764 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ec0b61ae-ccac-473b-ab43-e21daf1c348e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.537133 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" event={"ID":"ec0b61ae-ccac-473b-ab43-e21daf1c348e","Type":"ContainerDied","Data":"43d4ceb59646141456d59011861afe7dae73efb248bec0a45cf47565c3a496d9"} Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.537171 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43d4ceb59646141456d59011861afe7dae73efb248bec0a45cf47565c3a496d9" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.537194 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.645789 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn"] Nov 25 09:40:57 crc kubenswrapper[4687]: E1125 09:40:57.646479 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec0b61ae-ccac-473b-ab43-e21daf1c348e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.646622 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec0b61ae-ccac-473b-ab43-e21daf1c348e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.646890 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec0b61ae-ccac-473b-ab43-e21daf1c348e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.647687 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.649824 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.649950 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.650684 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.650833 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.655363 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.657235 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn"] Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.743292 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.743342 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.743421 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cm74q\" (UniqueName: \"kubernetes.io/projected/7cf72d64-3a5f-42c4-a290-2244169a8a60-kube-api-access-cm74q\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.743460 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.743518 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.845167 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.845295 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.846222 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cm74q\" (UniqueName: \"kubernetes.io/projected/7cf72d64-3a5f-42c4-a290-2244169a8a60-kube-api-access-cm74q\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.846267 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.846317 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.850652 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.850682 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.851436 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.851653 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.863684 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cm74q\" (UniqueName: \"kubernetes.io/projected/7cf72d64-3a5f-42c4-a290-2244169a8a60-kube-api-access-cm74q\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:57 crc kubenswrapper[4687]: I1125 09:40:57.970954 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:40:58 crc kubenswrapper[4687]: I1125 09:40:58.503801 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn"] Nov 25 09:40:58 crc kubenswrapper[4687]: I1125 09:40:58.548883 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" event={"ID":"7cf72d64-3a5f-42c4-a290-2244169a8a60","Type":"ContainerStarted","Data":"e76b90cd6798e3117c1b775622cb37a7f145453d907064ba6e4700582f1e3cb8"} Nov 25 09:41:01 crc kubenswrapper[4687]: I1125 09:41:01.572694 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" event={"ID":"7cf72d64-3a5f-42c4-a290-2244169a8a60","Type":"ContainerStarted","Data":"3dc7c34e93921c902a6262c55e25be0fa496aa86af0346693b07ab3b54cac08f"} Nov 25 09:41:01 crc kubenswrapper[4687]: I1125 09:41:01.596141 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" podStartSLOduration=2.984246081 podStartE2EDuration="4.596120078s" podCreationTimestamp="2025-11-25 09:40:57 +0000 UTC" firstStartedPulling="2025-11-25 09:40:58.504273028 +0000 UTC m=+2253.557912746" lastFinishedPulling="2025-11-25 09:41:00.116147025 +0000 UTC m=+2255.169786743" observedRunningTime="2025-11-25 09:41:01.587960686 +0000 UTC m=+2256.641600414" watchObservedRunningTime="2025-11-25 09:41:01.596120078 +0000 UTC m=+2256.649759796" Nov 25 09:42:23 crc kubenswrapper[4687]: I1125 09:42:23.844456 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:42:23 crc kubenswrapper[4687]: I1125 09:42:23.845273 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:42:53 crc kubenswrapper[4687]: I1125 09:42:53.844186 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:42:53 crc kubenswrapper[4687]: I1125 09:42:53.844834 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:43:23 crc kubenswrapper[4687]: I1125 09:43:23.844694 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:43:23 crc kubenswrapper[4687]: I1125 09:43:23.845457 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:43:23 crc kubenswrapper[4687]: I1125 09:43:23.845515 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:43:23 crc kubenswrapper[4687]: I1125 09:43:23.846200 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:43:23 crc kubenswrapper[4687]: I1125 09:43:23.846254 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" gracePeriod=600 Nov 25 09:43:24 crc kubenswrapper[4687]: E1125 09:43:24.092072 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:43:24 crc kubenswrapper[4687]: I1125 09:43:24.826830 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" exitCode=0 Nov 25 09:43:24 crc kubenswrapper[4687]: I1125 09:43:24.826875 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e"} Nov 25 09:43:24 crc kubenswrapper[4687]: I1125 09:43:24.826925 4687 scope.go:117] "RemoveContainer" containerID="f232f6663409036e1916f49b44a9d348d4367ab37376d99fe2b76c8655d5ccae" Nov 25 09:43:24 crc kubenswrapper[4687]: I1125 09:43:24.828093 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:43:24 crc kubenswrapper[4687]: E1125 09:43:24.828528 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:43:38 crc kubenswrapper[4687]: I1125 09:43:38.216654 4687 scope.go:117] "RemoveContainer" containerID="c3aa35027166df439bf3057cff3bf25cf69b2bae36e4532674c015b23d234805" Nov 25 09:43:38 crc kubenswrapper[4687]: I1125 09:43:38.241488 4687 scope.go:117] "RemoveContainer" containerID="211b0ae84f399f28b167bd07b06471dc138d2a87ab00a626fe43a046a1fd4131" Nov 25 09:43:38 crc kubenswrapper[4687]: I1125 09:43:38.264766 4687 scope.go:117] "RemoveContainer" containerID="7fa026561c2336f47884d1f865595f6bd3d7e55ad82be2dc76748ba87ab32526" Nov 25 09:43:38 crc kubenswrapper[4687]: I1125 09:43:38.734938 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:43:38 crc kubenswrapper[4687]: E1125 09:43:38.735208 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:43:51 crc kubenswrapper[4687]: I1125 09:43:51.734858 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:43:51 crc kubenswrapper[4687]: E1125 09:43:51.735695 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:44:03 crc kubenswrapper[4687]: I1125 09:44:03.734940 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:44:03 crc kubenswrapper[4687]: E1125 09:44:03.735797 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:44:15 crc kubenswrapper[4687]: I1125 09:44:15.742407 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:44:15 crc kubenswrapper[4687]: E1125 09:44:15.743484 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:44:29 crc kubenswrapper[4687]: I1125 09:44:29.734457 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:44:29 crc kubenswrapper[4687]: E1125 09:44:29.735564 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:44:41 crc kubenswrapper[4687]: I1125 09:44:41.734926 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:44:41 crc kubenswrapper[4687]: E1125 09:44:41.735668 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:44:55 crc kubenswrapper[4687]: I1125 09:44:55.764027 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:44:55 crc kubenswrapper[4687]: E1125 09:44:55.765303 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.145382 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m"] Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.147361 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.150360 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.152242 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.175977 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m"] Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.312082 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j88xx\" (UniqueName: \"kubernetes.io/projected/7b90b5d8-a050-44f2-bb81-d17468571fcc-kube-api-access-j88xx\") pod \"collect-profiles-29401065-96q8m\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.312122 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7b90b5d8-a050-44f2-bb81-d17468571fcc-secret-volume\") pod \"collect-profiles-29401065-96q8m\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.312273 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7b90b5d8-a050-44f2-bb81-d17468571fcc-config-volume\") pod \"collect-profiles-29401065-96q8m\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.413812 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7b90b5d8-a050-44f2-bb81-d17468571fcc-config-volume\") pod \"collect-profiles-29401065-96q8m\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.413870 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j88xx\" (UniqueName: \"kubernetes.io/projected/7b90b5d8-a050-44f2-bb81-d17468571fcc-kube-api-access-j88xx\") pod \"collect-profiles-29401065-96q8m\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.413902 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7b90b5d8-a050-44f2-bb81-d17468571fcc-secret-volume\") pod \"collect-profiles-29401065-96q8m\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.415644 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7b90b5d8-a050-44f2-bb81-d17468571fcc-config-volume\") pod \"collect-profiles-29401065-96q8m\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.422280 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7b90b5d8-a050-44f2-bb81-d17468571fcc-secret-volume\") pod \"collect-profiles-29401065-96q8m\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.443471 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j88xx\" (UniqueName: \"kubernetes.io/projected/7b90b5d8-a050-44f2-bb81-d17468571fcc-kube-api-access-j88xx\") pod \"collect-profiles-29401065-96q8m\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.479121 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:00 crc kubenswrapper[4687]: I1125 09:45:00.947093 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m"] Nov 25 09:45:01 crc kubenswrapper[4687]: I1125 09:45:01.695463 4687 generic.go:334] "Generic (PLEG): container finished" podID="7b90b5d8-a050-44f2-bb81-d17468571fcc" containerID="94d5f1191984c2a33e3cb2ea469e1ed0af86fa279099765e4f553b21dc3e23bc" exitCode=0 Nov 25 09:45:01 crc kubenswrapper[4687]: I1125 09:45:01.695542 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" event={"ID":"7b90b5d8-a050-44f2-bb81-d17468571fcc","Type":"ContainerDied","Data":"94d5f1191984c2a33e3cb2ea469e1ed0af86fa279099765e4f553b21dc3e23bc"} Nov 25 09:45:01 crc kubenswrapper[4687]: I1125 09:45:01.695817 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" event={"ID":"7b90b5d8-a050-44f2-bb81-d17468571fcc","Type":"ContainerStarted","Data":"80b573698352e01f733b695128472febd33851681f931db58361a8bae7489565"} Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.031922 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.161974 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7b90b5d8-a050-44f2-bb81-d17468571fcc-secret-volume\") pod \"7b90b5d8-a050-44f2-bb81-d17468571fcc\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.162398 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j88xx\" (UniqueName: \"kubernetes.io/projected/7b90b5d8-a050-44f2-bb81-d17468571fcc-kube-api-access-j88xx\") pod \"7b90b5d8-a050-44f2-bb81-d17468571fcc\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.162446 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7b90b5d8-a050-44f2-bb81-d17468571fcc-config-volume\") pod \"7b90b5d8-a050-44f2-bb81-d17468571fcc\" (UID: \"7b90b5d8-a050-44f2-bb81-d17468571fcc\") " Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.163603 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b90b5d8-a050-44f2-bb81-d17468571fcc-config-volume" (OuterVolumeSpecName: "config-volume") pod "7b90b5d8-a050-44f2-bb81-d17468571fcc" (UID: "7b90b5d8-a050-44f2-bb81-d17468571fcc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.173807 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b90b5d8-a050-44f2-bb81-d17468571fcc-kube-api-access-j88xx" (OuterVolumeSpecName: "kube-api-access-j88xx") pod "7b90b5d8-a050-44f2-bb81-d17468571fcc" (UID: "7b90b5d8-a050-44f2-bb81-d17468571fcc"). InnerVolumeSpecName "kube-api-access-j88xx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.183322 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b90b5d8-a050-44f2-bb81-d17468571fcc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7b90b5d8-a050-44f2-bb81-d17468571fcc" (UID: "7b90b5d8-a050-44f2-bb81-d17468571fcc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.264381 4687 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7b90b5d8-a050-44f2-bb81-d17468571fcc-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.264425 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j88xx\" (UniqueName: \"kubernetes.io/projected/7b90b5d8-a050-44f2-bb81-d17468571fcc-kube-api-access-j88xx\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.264441 4687 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7b90b5d8-a050-44f2-bb81-d17468571fcc-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.713123 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" event={"ID":"7b90b5d8-a050-44f2-bb81-d17468571fcc","Type":"ContainerDied","Data":"80b573698352e01f733b695128472febd33851681f931db58361a8bae7489565"} Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.713160 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80b573698352e01f733b695128472febd33851681f931db58361a8bae7489565" Nov 25 09:45:03 crc kubenswrapper[4687]: I1125 09:45:03.713201 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-96q8m" Nov 25 09:45:04 crc kubenswrapper[4687]: I1125 09:45:04.127211 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw"] Nov 25 09:45:04 crc kubenswrapper[4687]: I1125 09:45:04.134862 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401020-kvfxw"] Nov 25 09:45:05 crc kubenswrapper[4687]: I1125 09:45:05.744655 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c01bb95-1d19-435a-9090-da58d2110922" path="/var/lib/kubelet/pods/3c01bb95-1d19-435a-9090-da58d2110922/volumes" Nov 25 09:45:10 crc kubenswrapper[4687]: I1125 09:45:10.735035 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:45:10 crc kubenswrapper[4687]: E1125 09:45:10.735963 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:45:11 crc kubenswrapper[4687]: I1125 09:45:11.787523 4687 generic.go:334] "Generic (PLEG): container finished" podID="7cf72d64-3a5f-42c4-a290-2244169a8a60" containerID="3dc7c34e93921c902a6262c55e25be0fa496aa86af0346693b07ab3b54cac08f" exitCode=0 Nov 25 09:45:11 crc kubenswrapper[4687]: I1125 09:45:11.787682 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" event={"ID":"7cf72d64-3a5f-42c4-a290-2244169a8a60","Type":"ContainerDied","Data":"3dc7c34e93921c902a6262c55e25be0fa496aa86af0346693b07ab3b54cac08f"} Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.200883 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.291663 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-combined-ca-bundle\") pod \"7cf72d64-3a5f-42c4-a290-2244169a8a60\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.291705 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-secret-0\") pod \"7cf72d64-3a5f-42c4-a290-2244169a8a60\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.292455 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cm74q\" (UniqueName: \"kubernetes.io/projected/7cf72d64-3a5f-42c4-a290-2244169a8a60-kube-api-access-cm74q\") pod \"7cf72d64-3a5f-42c4-a290-2244169a8a60\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.292692 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-inventory\") pod \"7cf72d64-3a5f-42c4-a290-2244169a8a60\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.292764 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-ssh-key\") pod \"7cf72d64-3a5f-42c4-a290-2244169a8a60\" (UID: \"7cf72d64-3a5f-42c4-a290-2244169a8a60\") " Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.296845 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cf72d64-3a5f-42c4-a290-2244169a8a60-kube-api-access-cm74q" (OuterVolumeSpecName: "kube-api-access-cm74q") pod "7cf72d64-3a5f-42c4-a290-2244169a8a60" (UID: "7cf72d64-3a5f-42c4-a290-2244169a8a60"). InnerVolumeSpecName "kube-api-access-cm74q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.297075 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "7cf72d64-3a5f-42c4-a290-2244169a8a60" (UID: "7cf72d64-3a5f-42c4-a290-2244169a8a60"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.318032 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "7cf72d64-3a5f-42c4-a290-2244169a8a60" (UID: "7cf72d64-3a5f-42c4-a290-2244169a8a60"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.320708 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-inventory" (OuterVolumeSpecName: "inventory") pod "7cf72d64-3a5f-42c4-a290-2244169a8a60" (UID: "7cf72d64-3a5f-42c4-a290-2244169a8a60"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.322319 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7cf72d64-3a5f-42c4-a290-2244169a8a60" (UID: "7cf72d64-3a5f-42c4-a290-2244169a8a60"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.395832 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.395869 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.395882 4687 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.395898 4687 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/7cf72d64-3a5f-42c4-a290-2244169a8a60-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.395911 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cm74q\" (UniqueName: \"kubernetes.io/projected/7cf72d64-3a5f-42c4-a290-2244169a8a60-kube-api-access-cm74q\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.805332 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" event={"ID":"7cf72d64-3a5f-42c4-a290-2244169a8a60","Type":"ContainerDied","Data":"e76b90cd6798e3117c1b775622cb37a7f145453d907064ba6e4700582f1e3cb8"} Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.805378 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e76b90cd6798e3117c1b775622cb37a7f145453d907064ba6e4700582f1e3cb8" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.805431 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.904861 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx"] Nov 25 09:45:13 crc kubenswrapper[4687]: E1125 09:45:13.905549 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cf72d64-3a5f-42c4-a290-2244169a8a60" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.905581 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cf72d64-3a5f-42c4-a290-2244169a8a60" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 09:45:13 crc kubenswrapper[4687]: E1125 09:45:13.905613 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b90b5d8-a050-44f2-bb81-d17468571fcc" containerName="collect-profiles" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.905624 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b90b5d8-a050-44f2-bb81-d17468571fcc" containerName="collect-profiles" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.905977 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cf72d64-3a5f-42c4-a290-2244169a8a60" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.906017 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b90b5d8-a050-44f2-bb81-d17468571fcc" containerName="collect-profiles" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.907088 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.909312 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.909646 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.909782 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.911413 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.911779 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.911642 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.912113 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:45:13 crc kubenswrapper[4687]: I1125 09:45:13.938728 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx"] Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.007971 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.008026 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.008114 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/74866639-8460-4684-afe9-2e19c59db722-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.008169 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.008232 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.008266 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.008289 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmzr6\" (UniqueName: \"kubernetes.io/projected/74866639-8460-4684-afe9-2e19c59db722-kube-api-access-fmzr6\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.008316 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.008398 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.110235 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.110300 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.110331 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.110397 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/74866639-8460-4684-afe9-2e19c59db722-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.110448 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.110536 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.110575 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.110601 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmzr6\" (UniqueName: \"kubernetes.io/projected/74866639-8460-4684-afe9-2e19c59db722-kube-api-access-fmzr6\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.110665 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.111523 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/74866639-8460-4684-afe9-2e19c59db722-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.114041 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.114935 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.115002 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.116313 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.117351 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.117896 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.118890 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.131875 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmzr6\" (UniqueName: \"kubernetes.io/projected/74866639-8460-4684-afe9-2e19c59db722-kube-api-access-fmzr6\") pod \"nova-edpm-deployment-openstack-edpm-ipam-lvpfx\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.223737 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.714555 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx"] Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.720023 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:45:14 crc kubenswrapper[4687]: I1125 09:45:14.814955 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" event={"ID":"74866639-8460-4684-afe9-2e19c59db722","Type":"ContainerStarted","Data":"6e52708a31f973cdcc7b944cd59f87e3988f87e4d103e49eb28bf498fae63a2b"} Nov 25 09:45:16 crc kubenswrapper[4687]: I1125 09:45:16.833466 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" event={"ID":"74866639-8460-4684-afe9-2e19c59db722","Type":"ContainerStarted","Data":"1cd095328546335212b5b75475fb44aac9b133b618846f2b73b19b43756231a9"} Nov 25 09:45:16 crc kubenswrapper[4687]: I1125 09:45:16.853833 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" podStartSLOduration=2.908332215 podStartE2EDuration="3.853815961s" podCreationTimestamp="2025-11-25 09:45:13 +0000 UTC" firstStartedPulling="2025-11-25 09:45:14.719783177 +0000 UTC m=+2509.773422895" lastFinishedPulling="2025-11-25 09:45:15.665266923 +0000 UTC m=+2510.718906641" observedRunningTime="2025-11-25 09:45:16.85306491 +0000 UTC m=+2511.906704628" watchObservedRunningTime="2025-11-25 09:45:16.853815961 +0000 UTC m=+2511.907455679" Nov 25 09:45:23 crc kubenswrapper[4687]: I1125 09:45:23.735476 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:45:23 crc kubenswrapper[4687]: E1125 09:45:23.736883 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:45:38 crc kubenswrapper[4687]: I1125 09:45:38.354541 4687 scope.go:117] "RemoveContainer" containerID="133002c858286be0689e24f27dbf1e85be50b37602392c9a2138d17f16040629" Nov 25 09:45:38 crc kubenswrapper[4687]: I1125 09:45:38.734786 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:45:38 crc kubenswrapper[4687]: E1125 09:45:38.735488 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:45:52 crc kubenswrapper[4687]: I1125 09:45:52.734610 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:45:52 crc kubenswrapper[4687]: E1125 09:45:52.735441 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:46:05 crc kubenswrapper[4687]: I1125 09:46:05.749749 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:46:05 crc kubenswrapper[4687]: E1125 09:46:05.750493 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:46:17 crc kubenswrapper[4687]: I1125 09:46:17.734956 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:46:17 crc kubenswrapper[4687]: E1125 09:46:17.735946 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:46:31 crc kubenswrapper[4687]: I1125 09:46:31.734756 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:46:31 crc kubenswrapper[4687]: E1125 09:46:31.736450 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:46:45 crc kubenswrapper[4687]: I1125 09:46:45.740448 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:46:45 crc kubenswrapper[4687]: E1125 09:46:45.741372 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:46:57 crc kubenswrapper[4687]: I1125 09:46:57.736016 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:46:57 crc kubenswrapper[4687]: E1125 09:46:57.736831 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:47:10 crc kubenswrapper[4687]: I1125 09:47:10.734399 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:47:10 crc kubenswrapper[4687]: E1125 09:47:10.736053 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:47:10 crc kubenswrapper[4687]: I1125 09:47:10.890532 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mfr9q"] Nov 25 09:47:10 crc kubenswrapper[4687]: I1125 09:47:10.892360 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:10 crc kubenswrapper[4687]: I1125 09:47:10.913425 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mfr9q"] Nov 25 09:47:10 crc kubenswrapper[4687]: I1125 09:47:10.956365 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khfpj\" (UniqueName: \"kubernetes.io/projected/6bf15d66-3124-4075-acc1-862cb294a9ae-kube-api-access-khfpj\") pod \"community-operators-mfr9q\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:10 crc kubenswrapper[4687]: I1125 09:47:10.956539 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-utilities\") pod \"community-operators-mfr9q\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:10 crc kubenswrapper[4687]: I1125 09:47:10.956573 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-catalog-content\") pod \"community-operators-mfr9q\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:11 crc kubenswrapper[4687]: I1125 09:47:11.058064 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-utilities\") pod \"community-operators-mfr9q\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:11 crc kubenswrapper[4687]: I1125 09:47:11.058126 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-catalog-content\") pod \"community-operators-mfr9q\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:11 crc kubenswrapper[4687]: I1125 09:47:11.058270 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khfpj\" (UniqueName: \"kubernetes.io/projected/6bf15d66-3124-4075-acc1-862cb294a9ae-kube-api-access-khfpj\") pod \"community-operators-mfr9q\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:11 crc kubenswrapper[4687]: I1125 09:47:11.059157 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-catalog-content\") pod \"community-operators-mfr9q\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:11 crc kubenswrapper[4687]: I1125 09:47:11.059244 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-utilities\") pod \"community-operators-mfr9q\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:11 crc kubenswrapper[4687]: I1125 09:47:11.078928 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khfpj\" (UniqueName: \"kubernetes.io/projected/6bf15d66-3124-4075-acc1-862cb294a9ae-kube-api-access-khfpj\") pod \"community-operators-mfr9q\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:11 crc kubenswrapper[4687]: I1125 09:47:11.214702 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:11 crc kubenswrapper[4687]: I1125 09:47:11.733091 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mfr9q"] Nov 25 09:47:12 crc kubenswrapper[4687]: I1125 09:47:12.255367 4687 generic.go:334] "Generic (PLEG): container finished" podID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerID="530c61b52d1bd736e5d4cfe90df4e67a78886dd2989d5a9261507341610d57fe" exitCode=0 Nov 25 09:47:12 crc kubenswrapper[4687]: I1125 09:47:12.255446 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfr9q" event={"ID":"6bf15d66-3124-4075-acc1-862cb294a9ae","Type":"ContainerDied","Data":"530c61b52d1bd736e5d4cfe90df4e67a78886dd2989d5a9261507341610d57fe"} Nov 25 09:47:12 crc kubenswrapper[4687]: I1125 09:47:12.255729 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfr9q" event={"ID":"6bf15d66-3124-4075-acc1-862cb294a9ae","Type":"ContainerStarted","Data":"5561421cb39eea5cbaf58321e7bdbdf41e5fe3345a35426b8aff2afd1631cd9c"} Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.265367 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfr9q" event={"ID":"6bf15d66-3124-4075-acc1-862cb294a9ae","Type":"ContainerStarted","Data":"b451f9d3adaab77ae6bdf7305887057ce85cb14cc5be004a28ff57e048a84523"} Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.472454 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lrftb"] Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.474334 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.482007 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lrftb"] Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.611150 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-utilities\") pod \"redhat-operators-lrftb\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.611407 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crf7r\" (UniqueName: \"kubernetes.io/projected/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-kube-api-access-crf7r\") pod \"redhat-operators-lrftb\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.611577 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-catalog-content\") pod \"redhat-operators-lrftb\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.713589 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-utilities\") pod \"redhat-operators-lrftb\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.713694 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crf7r\" (UniqueName: \"kubernetes.io/projected/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-kube-api-access-crf7r\") pod \"redhat-operators-lrftb\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.713757 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-catalog-content\") pod \"redhat-operators-lrftb\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.714222 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-utilities\") pod \"redhat-operators-lrftb\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.714355 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-catalog-content\") pod \"redhat-operators-lrftb\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.737180 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crf7r\" (UniqueName: \"kubernetes.io/projected/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-kube-api-access-crf7r\") pod \"redhat-operators-lrftb\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:13 crc kubenswrapper[4687]: I1125 09:47:13.797428 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:14 crc kubenswrapper[4687]: W1125 09:47:14.253281 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4aab2c0c_5aa9_4f81_8a1b_71f75d203709.slice/crio-7fb78eb909e34a55c7f87b25efb0f1c544445e973f1b84dc552da2c0651e75ee WatchSource:0}: Error finding container 7fb78eb909e34a55c7f87b25efb0f1c544445e973f1b84dc552da2c0651e75ee: Status 404 returned error can't find the container with id 7fb78eb909e34a55c7f87b25efb0f1c544445e973f1b84dc552da2c0651e75ee Nov 25 09:47:14 crc kubenswrapper[4687]: I1125 09:47:14.255584 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lrftb"] Nov 25 09:47:14 crc kubenswrapper[4687]: I1125 09:47:14.275936 4687 generic.go:334] "Generic (PLEG): container finished" podID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerID="b451f9d3adaab77ae6bdf7305887057ce85cb14cc5be004a28ff57e048a84523" exitCode=0 Nov 25 09:47:14 crc kubenswrapper[4687]: I1125 09:47:14.275998 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfr9q" event={"ID":"6bf15d66-3124-4075-acc1-862cb294a9ae","Type":"ContainerDied","Data":"b451f9d3adaab77ae6bdf7305887057ce85cb14cc5be004a28ff57e048a84523"} Nov 25 09:47:14 crc kubenswrapper[4687]: I1125 09:47:14.278778 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lrftb" event={"ID":"4aab2c0c-5aa9-4f81-8a1b-71f75d203709","Type":"ContainerStarted","Data":"7fb78eb909e34a55c7f87b25efb0f1c544445e973f1b84dc552da2c0651e75ee"} Nov 25 09:47:15 crc kubenswrapper[4687]: I1125 09:47:15.293794 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfr9q" event={"ID":"6bf15d66-3124-4075-acc1-862cb294a9ae","Type":"ContainerStarted","Data":"ad81aabe0eddac518739b7dc7d0dec0cea1ea728b32b1967755d8b0138d84a16"} Nov 25 09:47:15 crc kubenswrapper[4687]: I1125 09:47:15.297178 4687 generic.go:334] "Generic (PLEG): container finished" podID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerID="5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd" exitCode=0 Nov 25 09:47:15 crc kubenswrapper[4687]: I1125 09:47:15.297229 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lrftb" event={"ID":"4aab2c0c-5aa9-4f81-8a1b-71f75d203709","Type":"ContainerDied","Data":"5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd"} Nov 25 09:47:15 crc kubenswrapper[4687]: I1125 09:47:15.319854 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mfr9q" podStartSLOduration=2.924941678 podStartE2EDuration="5.319832418s" podCreationTimestamp="2025-11-25 09:47:10 +0000 UTC" firstStartedPulling="2025-11-25 09:47:12.257393399 +0000 UTC m=+2627.311033117" lastFinishedPulling="2025-11-25 09:47:14.652284139 +0000 UTC m=+2629.705923857" observedRunningTime="2025-11-25 09:47:15.314939609 +0000 UTC m=+2630.368579327" watchObservedRunningTime="2025-11-25 09:47:15.319832418 +0000 UTC m=+2630.373472126" Nov 25 09:47:17 crc kubenswrapper[4687]: I1125 09:47:17.317986 4687 generic.go:334] "Generic (PLEG): container finished" podID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerID="8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8" exitCode=0 Nov 25 09:47:17 crc kubenswrapper[4687]: I1125 09:47:17.318050 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lrftb" event={"ID":"4aab2c0c-5aa9-4f81-8a1b-71f75d203709","Type":"ContainerDied","Data":"8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8"} Nov 25 09:47:18 crc kubenswrapper[4687]: I1125 09:47:18.332044 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lrftb" event={"ID":"4aab2c0c-5aa9-4f81-8a1b-71f75d203709","Type":"ContainerStarted","Data":"8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6"} Nov 25 09:47:18 crc kubenswrapper[4687]: I1125 09:47:18.354612 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lrftb" podStartSLOduration=2.938979942 podStartE2EDuration="5.354592568s" podCreationTimestamp="2025-11-25 09:47:13 +0000 UTC" firstStartedPulling="2025-11-25 09:47:15.299861742 +0000 UTC m=+2630.353501480" lastFinishedPulling="2025-11-25 09:47:17.715474388 +0000 UTC m=+2632.769114106" observedRunningTime="2025-11-25 09:47:18.348856836 +0000 UTC m=+2633.402496554" watchObservedRunningTime="2025-11-25 09:47:18.354592568 +0000 UTC m=+2633.408232286" Nov 25 09:47:21 crc kubenswrapper[4687]: I1125 09:47:21.215406 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:21 crc kubenswrapper[4687]: I1125 09:47:21.219039 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:21 crc kubenswrapper[4687]: I1125 09:47:21.594026 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:22 crc kubenswrapper[4687]: I1125 09:47:22.608493 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:22 crc kubenswrapper[4687]: I1125 09:47:22.659099 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mfr9q"] Nov 25 09:47:23 crc kubenswrapper[4687]: I1125 09:47:23.798472 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:23 crc kubenswrapper[4687]: I1125 09:47:23.798599 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:24 crc kubenswrapper[4687]: I1125 09:47:24.576975 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mfr9q" podUID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerName="registry-server" containerID="cri-o://ad81aabe0eddac518739b7dc7d0dec0cea1ea728b32b1967755d8b0138d84a16" gracePeriod=2 Nov 25 09:47:24 crc kubenswrapper[4687]: I1125 09:47:24.852534 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lrftb" podUID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerName="registry-server" probeResult="failure" output=< Nov 25 09:47:24 crc kubenswrapper[4687]: timeout: failed to connect service ":50051" within 1s Nov 25 09:47:24 crc kubenswrapper[4687]: > Nov 25 09:47:25 crc kubenswrapper[4687]: I1125 09:47:25.741312 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:47:25 crc kubenswrapper[4687]: E1125 09:47:25.741941 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:47:26 crc kubenswrapper[4687]: I1125 09:47:26.595607 4687 generic.go:334] "Generic (PLEG): container finished" podID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerID="ad81aabe0eddac518739b7dc7d0dec0cea1ea728b32b1967755d8b0138d84a16" exitCode=0 Nov 25 09:47:26 crc kubenswrapper[4687]: I1125 09:47:26.595669 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfr9q" event={"ID":"6bf15d66-3124-4075-acc1-862cb294a9ae","Type":"ContainerDied","Data":"ad81aabe0eddac518739b7dc7d0dec0cea1ea728b32b1967755d8b0138d84a16"} Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.044521 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.142865 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khfpj\" (UniqueName: \"kubernetes.io/projected/6bf15d66-3124-4075-acc1-862cb294a9ae-kube-api-access-khfpj\") pod \"6bf15d66-3124-4075-acc1-862cb294a9ae\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.142947 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-utilities\") pod \"6bf15d66-3124-4075-acc1-862cb294a9ae\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.143144 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-catalog-content\") pod \"6bf15d66-3124-4075-acc1-862cb294a9ae\" (UID: \"6bf15d66-3124-4075-acc1-862cb294a9ae\") " Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.145132 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-utilities" (OuterVolumeSpecName: "utilities") pod "6bf15d66-3124-4075-acc1-862cb294a9ae" (UID: "6bf15d66-3124-4075-acc1-862cb294a9ae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.146566 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.165607 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bf15d66-3124-4075-acc1-862cb294a9ae-kube-api-access-khfpj" (OuterVolumeSpecName: "kube-api-access-khfpj") pod "6bf15d66-3124-4075-acc1-862cb294a9ae" (UID: "6bf15d66-3124-4075-acc1-862cb294a9ae"). InnerVolumeSpecName "kube-api-access-khfpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.197709 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6bf15d66-3124-4075-acc1-862cb294a9ae" (UID: "6bf15d66-3124-4075-acc1-862cb294a9ae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.248810 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bf15d66-3124-4075-acc1-862cb294a9ae-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.248883 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khfpj\" (UniqueName: \"kubernetes.io/projected/6bf15d66-3124-4075-acc1-862cb294a9ae-kube-api-access-khfpj\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.607082 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfr9q" event={"ID":"6bf15d66-3124-4075-acc1-862cb294a9ae","Type":"ContainerDied","Data":"5561421cb39eea5cbaf58321e7bdbdf41e5fe3345a35426b8aff2afd1631cd9c"} Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.607164 4687 scope.go:117] "RemoveContainer" containerID="ad81aabe0eddac518739b7dc7d0dec0cea1ea728b32b1967755d8b0138d84a16" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.607188 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mfr9q" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.640083 4687 scope.go:117] "RemoveContainer" containerID="b451f9d3adaab77ae6bdf7305887057ce85cb14cc5be004a28ff57e048a84523" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.648775 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mfr9q"] Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.658182 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mfr9q"] Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.671707 4687 scope.go:117] "RemoveContainer" containerID="530c61b52d1bd736e5d4cfe90df4e67a78886dd2989d5a9261507341610d57fe" Nov 25 09:47:27 crc kubenswrapper[4687]: I1125 09:47:27.747876 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bf15d66-3124-4075-acc1-862cb294a9ae" path="/var/lib/kubelet/pods/6bf15d66-3124-4075-acc1-862cb294a9ae/volumes" Nov 25 09:47:33 crc kubenswrapper[4687]: I1125 09:47:33.854366 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:33 crc kubenswrapper[4687]: I1125 09:47:33.906071 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:34 crc kubenswrapper[4687]: I1125 09:47:34.090454 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lrftb"] Nov 25 09:47:35 crc kubenswrapper[4687]: I1125 09:47:35.671226 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lrftb" podUID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerName="registry-server" containerID="cri-o://8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6" gracePeriod=2 Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.147921 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.222729 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-utilities\") pod \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.222923 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-crf7r\" (UniqueName: \"kubernetes.io/projected/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-kube-api-access-crf7r\") pod \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.223154 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-catalog-content\") pod \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\" (UID: \"4aab2c0c-5aa9-4f81-8a1b-71f75d203709\") " Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.223664 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-utilities" (OuterVolumeSpecName: "utilities") pod "4aab2c0c-5aa9-4f81-8a1b-71f75d203709" (UID: "4aab2c0c-5aa9-4f81-8a1b-71f75d203709"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.232767 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-kube-api-access-crf7r" (OuterVolumeSpecName: "kube-api-access-crf7r") pod "4aab2c0c-5aa9-4f81-8a1b-71f75d203709" (UID: "4aab2c0c-5aa9-4f81-8a1b-71f75d203709"). InnerVolumeSpecName "kube-api-access-crf7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.319883 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4aab2c0c-5aa9-4f81-8a1b-71f75d203709" (UID: "4aab2c0c-5aa9-4f81-8a1b-71f75d203709"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.325411 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.325446 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.325455 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-crf7r\" (UniqueName: \"kubernetes.io/projected/4aab2c0c-5aa9-4f81-8a1b-71f75d203709-kube-api-access-crf7r\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.681990 4687 generic.go:334] "Generic (PLEG): container finished" podID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerID="8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6" exitCode=0 Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.682040 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lrftb" event={"ID":"4aab2c0c-5aa9-4f81-8a1b-71f75d203709","Type":"ContainerDied","Data":"8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6"} Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.682084 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lrftb" event={"ID":"4aab2c0c-5aa9-4f81-8a1b-71f75d203709","Type":"ContainerDied","Data":"7fb78eb909e34a55c7f87b25efb0f1c544445e973f1b84dc552da2c0651e75ee"} Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.682049 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lrftb" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.682109 4687 scope.go:117] "RemoveContainer" containerID="8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.718343 4687 scope.go:117] "RemoveContainer" containerID="8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.757633 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lrftb"] Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.758218 4687 scope.go:117] "RemoveContainer" containerID="5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.766593 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lrftb"] Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.789090 4687 scope.go:117] "RemoveContainer" containerID="8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6" Nov 25 09:47:36 crc kubenswrapper[4687]: E1125 09:47:36.789551 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6\": container with ID starting with 8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6 not found: ID does not exist" containerID="8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.789583 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6"} err="failed to get container status \"8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6\": rpc error: code = NotFound desc = could not find container \"8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6\": container with ID starting with 8bf6d71f55b392371e8c9eb8300f5f8439c3b7162edd65993053f5170f4e55a6 not found: ID does not exist" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.789607 4687 scope.go:117] "RemoveContainer" containerID="8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8" Nov 25 09:47:36 crc kubenswrapper[4687]: E1125 09:47:36.789943 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8\": container with ID starting with 8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8 not found: ID does not exist" containerID="8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.789966 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8"} err="failed to get container status \"8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8\": rpc error: code = NotFound desc = could not find container \"8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8\": container with ID starting with 8c13bc7e6225cdd7993584b5424ebe2c797a8730241c7b14b6afbcf4bd9754e8 not found: ID does not exist" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.789980 4687 scope.go:117] "RemoveContainer" containerID="5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd" Nov 25 09:47:36 crc kubenswrapper[4687]: E1125 09:47:36.790344 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd\": container with ID starting with 5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd not found: ID does not exist" containerID="5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd" Nov 25 09:47:36 crc kubenswrapper[4687]: I1125 09:47:36.790366 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd"} err="failed to get container status \"5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd\": rpc error: code = NotFound desc = could not find container \"5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd\": container with ID starting with 5e4514f202394d06b082c533e5898833646c2a132929c77b0da238e4287b5abd not found: ID does not exist" Nov 25 09:47:37 crc kubenswrapper[4687]: I1125 09:47:37.746270 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" path="/var/lib/kubelet/pods/4aab2c0c-5aa9-4f81-8a1b-71f75d203709/volumes" Nov 25 09:47:40 crc kubenswrapper[4687]: I1125 09:47:40.735014 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:47:40 crc kubenswrapper[4687]: E1125 09:47:40.735613 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:47:55 crc kubenswrapper[4687]: I1125 09:47:55.743740 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:47:55 crc kubenswrapper[4687]: E1125 09:47:55.744744 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:47:59 crc kubenswrapper[4687]: I1125 09:47:59.903542 4687 generic.go:334] "Generic (PLEG): container finished" podID="74866639-8460-4684-afe9-2e19c59db722" containerID="1cd095328546335212b5b75475fb44aac9b133b618846f2b73b19b43756231a9" exitCode=0 Nov 25 09:47:59 crc kubenswrapper[4687]: I1125 09:47:59.904049 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" event={"ID":"74866639-8460-4684-afe9-2e19c59db722","Type":"ContainerDied","Data":"1cd095328546335212b5b75475fb44aac9b133b618846f2b73b19b43756231a9"} Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.403840 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.535672 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-0\") pod \"74866639-8460-4684-afe9-2e19c59db722\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.536513 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-1\") pod \"74866639-8460-4684-afe9-2e19c59db722\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.536656 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/74866639-8460-4684-afe9-2e19c59db722-nova-extra-config-0\") pod \"74866639-8460-4684-afe9-2e19c59db722\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.536716 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-inventory\") pod \"74866639-8460-4684-afe9-2e19c59db722\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.536742 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-combined-ca-bundle\") pod \"74866639-8460-4684-afe9-2e19c59db722\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.536760 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-0\") pod \"74866639-8460-4684-afe9-2e19c59db722\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.536850 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-1\") pod \"74866639-8460-4684-afe9-2e19c59db722\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.536875 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-ssh-key\") pod \"74866639-8460-4684-afe9-2e19c59db722\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.536903 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmzr6\" (UniqueName: \"kubernetes.io/projected/74866639-8460-4684-afe9-2e19c59db722-kube-api-access-fmzr6\") pod \"74866639-8460-4684-afe9-2e19c59db722\" (UID: \"74866639-8460-4684-afe9-2e19c59db722\") " Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.543748 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74866639-8460-4684-afe9-2e19c59db722-kube-api-access-fmzr6" (OuterVolumeSpecName: "kube-api-access-fmzr6") pod "74866639-8460-4684-afe9-2e19c59db722" (UID: "74866639-8460-4684-afe9-2e19c59db722"). InnerVolumeSpecName "kube-api-access-fmzr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.544536 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "74866639-8460-4684-afe9-2e19c59db722" (UID: "74866639-8460-4684-afe9-2e19c59db722"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.566845 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-inventory" (OuterVolumeSpecName: "inventory") pod "74866639-8460-4684-afe9-2e19c59db722" (UID: "74866639-8460-4684-afe9-2e19c59db722"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.567285 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "74866639-8460-4684-afe9-2e19c59db722" (UID: "74866639-8460-4684-afe9-2e19c59db722"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.574483 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74866639-8460-4684-afe9-2e19c59db722-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "74866639-8460-4684-afe9-2e19c59db722" (UID: "74866639-8460-4684-afe9-2e19c59db722"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.574881 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "74866639-8460-4684-afe9-2e19c59db722" (UID: "74866639-8460-4684-afe9-2e19c59db722"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.576219 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "74866639-8460-4684-afe9-2e19c59db722" (UID: "74866639-8460-4684-afe9-2e19c59db722"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.577643 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "74866639-8460-4684-afe9-2e19c59db722" (UID: "74866639-8460-4684-afe9-2e19c59db722"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.585420 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "74866639-8460-4684-afe9-2e19c59db722" (UID: "74866639-8460-4684-afe9-2e19c59db722"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.639994 4687 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/74866639-8460-4684-afe9-2e19c59db722-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.640043 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.640058 4687 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.640076 4687 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.640113 4687 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.640125 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.640140 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmzr6\" (UniqueName: \"kubernetes.io/projected/74866639-8460-4684-afe9-2e19c59db722-kube-api-access-fmzr6\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.640153 4687 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.640165 4687 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/74866639-8460-4684-afe9-2e19c59db722-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.930083 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" event={"ID":"74866639-8460-4684-afe9-2e19c59db722","Type":"ContainerDied","Data":"6e52708a31f973cdcc7b944cd59f87e3988f87e4d103e49eb28bf498fae63a2b"} Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.930131 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e52708a31f973cdcc7b944cd59f87e3988f87e4d103e49eb28bf498fae63a2b" Nov 25 09:48:01 crc kubenswrapper[4687]: I1125 09:48:01.930147 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-lvpfx" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.035471 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq"] Nov 25 09:48:02 crc kubenswrapper[4687]: E1125 09:48:02.035987 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerName="extract-utilities" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.036010 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerName="extract-utilities" Nov 25 09:48:02 crc kubenswrapper[4687]: E1125 09:48:02.036027 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerName="extract-content" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.036036 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerName="extract-content" Nov 25 09:48:02 crc kubenswrapper[4687]: E1125 09:48:02.036050 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerName="registry-server" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.036059 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerName="registry-server" Nov 25 09:48:02 crc kubenswrapper[4687]: E1125 09:48:02.036071 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerName="extract-utilities" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.036078 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerName="extract-utilities" Nov 25 09:48:02 crc kubenswrapper[4687]: E1125 09:48:02.036113 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerName="extract-content" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.036121 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerName="extract-content" Nov 25 09:48:02 crc kubenswrapper[4687]: E1125 09:48:02.036138 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74866639-8460-4684-afe9-2e19c59db722" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.036145 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="74866639-8460-4684-afe9-2e19c59db722" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 09:48:02 crc kubenswrapper[4687]: E1125 09:48:02.036158 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerName="registry-server" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.036166 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerName="registry-server" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.036374 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bf15d66-3124-4075-acc1-862cb294a9ae" containerName="registry-server" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.036401 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aab2c0c-5aa9-4f81-8a1b-71f75d203709" containerName="registry-server" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.036422 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="74866639-8460-4684-afe9-2e19c59db722" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.037595 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.040752 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.042465 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.042766 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.044767 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-44nct" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.044937 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.065904 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq"] Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.150619 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.150667 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.150696 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.150967 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.151025 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.151094 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.151274 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9lwk\" (UniqueName: \"kubernetes.io/projected/cc7503d0-7742-479f-94f2-d2fbffd48809-kube-api-access-q9lwk\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.253929 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9lwk\" (UniqueName: \"kubernetes.io/projected/cc7503d0-7742-479f-94f2-d2fbffd48809-kube-api-access-q9lwk\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.254041 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.254073 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.254103 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.254160 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.254193 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.254228 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.259378 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.260241 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.261097 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.261734 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.261832 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.262478 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.273550 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9lwk\" (UniqueName: \"kubernetes.io/projected/cc7503d0-7742-479f-94f2-d2fbffd48809-kube-api-access-q9lwk\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.366179 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:48:02 crc kubenswrapper[4687]: I1125 09:48:02.965149 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq"] Nov 25 09:48:03 crc kubenswrapper[4687]: I1125 09:48:03.946262 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" event={"ID":"cc7503d0-7742-479f-94f2-d2fbffd48809","Type":"ContainerStarted","Data":"8d5bc35dfd3c36b1ef8c65965272842b363fcf60d77fc3de7e2abca8ea38ce84"} Nov 25 09:48:04 crc kubenswrapper[4687]: I1125 09:48:04.958448 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" event={"ID":"cc7503d0-7742-479f-94f2-d2fbffd48809","Type":"ContainerStarted","Data":"c8a005581eb320f40c8ad0a27fde40cb6bb27e6b12dc6748583a8afbeee877cd"} Nov 25 09:48:04 crc kubenswrapper[4687]: I1125 09:48:04.985316 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" podStartSLOduration=2.228808207 podStartE2EDuration="2.985290871s" podCreationTimestamp="2025-11-25 09:48:02 +0000 UTC" firstStartedPulling="2025-11-25 09:48:02.970963294 +0000 UTC m=+2678.024603162" lastFinishedPulling="2025-11-25 09:48:03.727446088 +0000 UTC m=+2678.781085826" observedRunningTime="2025-11-25 09:48:04.977425124 +0000 UTC m=+2680.031064842" watchObservedRunningTime="2025-11-25 09:48:04.985290871 +0000 UTC m=+2680.038930589" Nov 25 09:48:10 crc kubenswrapper[4687]: I1125 09:48:10.736009 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:48:10 crc kubenswrapper[4687]: E1125 09:48:10.737448 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:48:25 crc kubenswrapper[4687]: I1125 09:48:25.757587 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:48:26 crc kubenswrapper[4687]: I1125 09:48:26.192207 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"a8a2a78700106a41080a4da283d63c1bfe9b146dc1fcdfe20f9e7b23579d1d20"} Nov 25 09:50:19 crc kubenswrapper[4687]: I1125 09:50:19.157740 4687 generic.go:334] "Generic (PLEG): container finished" podID="cc7503d0-7742-479f-94f2-d2fbffd48809" containerID="c8a005581eb320f40c8ad0a27fde40cb6bb27e6b12dc6748583a8afbeee877cd" exitCode=0 Nov 25 09:50:19 crc kubenswrapper[4687]: I1125 09:50:19.157848 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" event={"ID":"cc7503d0-7742-479f-94f2-d2fbffd48809","Type":"ContainerDied","Data":"c8a005581eb320f40c8ad0a27fde40cb6bb27e6b12dc6748583a8afbeee877cd"} Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.561194 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.651346 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ssh-key\") pod \"cc7503d0-7742-479f-94f2-d2fbffd48809\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.651458 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-0\") pod \"cc7503d0-7742-479f-94f2-d2fbffd48809\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.652336 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-inventory\") pod \"cc7503d0-7742-479f-94f2-d2fbffd48809\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.652434 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-1\") pod \"cc7503d0-7742-479f-94f2-d2fbffd48809\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.652455 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-2\") pod \"cc7503d0-7742-479f-94f2-d2fbffd48809\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.652525 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-telemetry-combined-ca-bundle\") pod \"cc7503d0-7742-479f-94f2-d2fbffd48809\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.652667 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9lwk\" (UniqueName: \"kubernetes.io/projected/cc7503d0-7742-479f-94f2-d2fbffd48809-kube-api-access-q9lwk\") pod \"cc7503d0-7742-479f-94f2-d2fbffd48809\" (UID: \"cc7503d0-7742-479f-94f2-d2fbffd48809\") " Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.659301 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "cc7503d0-7742-479f-94f2-d2fbffd48809" (UID: "cc7503d0-7742-479f-94f2-d2fbffd48809"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.659721 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc7503d0-7742-479f-94f2-d2fbffd48809-kube-api-access-q9lwk" (OuterVolumeSpecName: "kube-api-access-q9lwk") pod "cc7503d0-7742-479f-94f2-d2fbffd48809" (UID: "cc7503d0-7742-479f-94f2-d2fbffd48809"). InnerVolumeSpecName "kube-api-access-q9lwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.686313 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "cc7503d0-7742-479f-94f2-d2fbffd48809" (UID: "cc7503d0-7742-479f-94f2-d2fbffd48809"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.686705 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "cc7503d0-7742-479f-94f2-d2fbffd48809" (UID: "cc7503d0-7742-479f-94f2-d2fbffd48809"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.686769 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cc7503d0-7742-479f-94f2-d2fbffd48809" (UID: "cc7503d0-7742-479f-94f2-d2fbffd48809"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.689586 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "cc7503d0-7742-479f-94f2-d2fbffd48809" (UID: "cc7503d0-7742-479f-94f2-d2fbffd48809"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.703223 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-inventory" (OuterVolumeSpecName: "inventory") pod "cc7503d0-7742-479f-94f2-d2fbffd48809" (UID: "cc7503d0-7742-479f-94f2-d2fbffd48809"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.755207 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.755264 4687 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.755287 4687 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.755302 4687 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.755319 4687 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.755330 4687 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7503d0-7742-479f-94f2-d2fbffd48809-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:20 crc kubenswrapper[4687]: I1125 09:50:20.755341 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9lwk\" (UniqueName: \"kubernetes.io/projected/cc7503d0-7742-479f-94f2-d2fbffd48809-kube-api-access-q9lwk\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:21 crc kubenswrapper[4687]: I1125 09:50:21.178201 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" event={"ID":"cc7503d0-7742-479f-94f2-d2fbffd48809","Type":"ContainerDied","Data":"8d5bc35dfd3c36b1ef8c65965272842b363fcf60d77fc3de7e2abca8ea38ce84"} Nov 25 09:50:21 crc kubenswrapper[4687]: I1125 09:50:21.178578 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d5bc35dfd3c36b1ef8c65965272842b363fcf60d77fc3de7e2abca8ea38ce84" Nov 25 09:50:21 crc kubenswrapper[4687]: I1125 09:50:21.178272 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.642202 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pfbd9"] Nov 25 09:50:26 crc kubenswrapper[4687]: E1125 09:50:26.643233 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc7503d0-7742-479f-94f2-d2fbffd48809" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.643251 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc7503d0-7742-479f-94f2-d2fbffd48809" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.643480 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc7503d0-7742-479f-94f2-d2fbffd48809" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.646609 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.658412 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pfbd9"] Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.776253 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-utilities\") pod \"redhat-marketplace-pfbd9\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.776731 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-catalog-content\") pod \"redhat-marketplace-pfbd9\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.776869 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvzq2\" (UniqueName: \"kubernetes.io/projected/430b5b1f-5485-4bd0-b597-8bb396fd04bd-kube-api-access-gvzq2\") pod \"redhat-marketplace-pfbd9\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.878258 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-catalog-content\") pod \"redhat-marketplace-pfbd9\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.878431 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvzq2\" (UniqueName: \"kubernetes.io/projected/430b5b1f-5485-4bd0-b597-8bb396fd04bd-kube-api-access-gvzq2\") pod \"redhat-marketplace-pfbd9\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.878454 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-utilities\") pod \"redhat-marketplace-pfbd9\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.878847 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-catalog-content\") pod \"redhat-marketplace-pfbd9\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.879163 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-utilities\") pod \"redhat-marketplace-pfbd9\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:26 crc kubenswrapper[4687]: I1125 09:50:26.900813 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvzq2\" (UniqueName: \"kubernetes.io/projected/430b5b1f-5485-4bd0-b597-8bb396fd04bd-kube-api-access-gvzq2\") pod \"redhat-marketplace-pfbd9\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:27 crc kubenswrapper[4687]: I1125 09:50:27.042410 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:27 crc kubenswrapper[4687]: I1125 09:50:27.514337 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pfbd9"] Nov 25 09:50:28 crc kubenswrapper[4687]: I1125 09:50:28.268108 4687 generic.go:334] "Generic (PLEG): container finished" podID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerID="acd511e4263492e824357ec29784286a960175685799a94dc2012cf25a4d3855" exitCode=0 Nov 25 09:50:28 crc kubenswrapper[4687]: I1125 09:50:28.268195 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfbd9" event={"ID":"430b5b1f-5485-4bd0-b597-8bb396fd04bd","Type":"ContainerDied","Data":"acd511e4263492e824357ec29784286a960175685799a94dc2012cf25a4d3855"} Nov 25 09:50:28 crc kubenswrapper[4687]: I1125 09:50:28.268406 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfbd9" event={"ID":"430b5b1f-5485-4bd0-b597-8bb396fd04bd","Type":"ContainerStarted","Data":"38e84d1e07273d66749e65d8f9c63af4b8485d315321b95ee53a4bcccb032d38"} Nov 25 09:50:28 crc kubenswrapper[4687]: I1125 09:50:28.269979 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:50:30 crc kubenswrapper[4687]: I1125 09:50:30.288463 4687 generic.go:334] "Generic (PLEG): container finished" podID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerID="05ad750463a125a3fe6a756ddfb75160842761cb55c1cc80640f999981a0e769" exitCode=0 Nov 25 09:50:30 crc kubenswrapper[4687]: I1125 09:50:30.288554 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfbd9" event={"ID":"430b5b1f-5485-4bd0-b597-8bb396fd04bd","Type":"ContainerDied","Data":"05ad750463a125a3fe6a756ddfb75160842761cb55c1cc80640f999981a0e769"} Nov 25 09:50:31 crc kubenswrapper[4687]: I1125 09:50:31.298604 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfbd9" event={"ID":"430b5b1f-5485-4bd0-b597-8bb396fd04bd","Type":"ContainerStarted","Data":"08e1d35ab91ae8b90c7f90c6ed81bdc6a688505d36ad07cbc3f1f1bcefbec0e5"} Nov 25 09:50:31 crc kubenswrapper[4687]: I1125 09:50:31.322358 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pfbd9" podStartSLOduration=2.870564108 podStartE2EDuration="5.322334458s" podCreationTimestamp="2025-11-25 09:50:26 +0000 UTC" firstStartedPulling="2025-11-25 09:50:28.269695597 +0000 UTC m=+2823.323335315" lastFinishedPulling="2025-11-25 09:50:30.721465947 +0000 UTC m=+2825.775105665" observedRunningTime="2025-11-25 09:50:31.317570483 +0000 UTC m=+2826.371210221" watchObservedRunningTime="2025-11-25 09:50:31.322334458 +0000 UTC m=+2826.375974176" Nov 25 09:50:37 crc kubenswrapper[4687]: I1125 09:50:37.044518 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:37 crc kubenswrapper[4687]: I1125 09:50:37.045077 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:37 crc kubenswrapper[4687]: I1125 09:50:37.097075 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:37 crc kubenswrapper[4687]: I1125 09:50:37.406067 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.634919 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qs8kk"] Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.639038 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.652978 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qs8kk"] Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.804137 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-utilities\") pod \"certified-operators-qs8kk\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.804235 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtr7r\" (UniqueName: \"kubernetes.io/projected/07dc6774-5763-4b72-b9ba-5b94fa3bc081-kube-api-access-qtr7r\") pod \"certified-operators-qs8kk\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.804267 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-catalog-content\") pod \"certified-operators-qs8kk\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.906525 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-catalog-content\") pod \"certified-operators-qs8kk\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.906722 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-utilities\") pod \"certified-operators-qs8kk\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.906798 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtr7r\" (UniqueName: \"kubernetes.io/projected/07dc6774-5763-4b72-b9ba-5b94fa3bc081-kube-api-access-qtr7r\") pod \"certified-operators-qs8kk\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.907077 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-catalog-content\") pod \"certified-operators-qs8kk\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.907287 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-utilities\") pod \"certified-operators-qs8kk\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.935578 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtr7r\" (UniqueName: \"kubernetes.io/projected/07dc6774-5763-4b72-b9ba-5b94fa3bc081-kube-api-access-qtr7r\") pod \"certified-operators-qs8kk\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:38 crc kubenswrapper[4687]: I1125 09:50:38.962899 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:39 crc kubenswrapper[4687]: I1125 09:50:39.530336 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qs8kk"] Nov 25 09:50:39 crc kubenswrapper[4687]: W1125 09:50:39.542187 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07dc6774_5763_4b72_b9ba_5b94fa3bc081.slice/crio-9e4e13a8e4c2c8e00d037e574d1a402f0398c3bd190bb4dcabbb94fbccaff0c1 WatchSource:0}: Error finding container 9e4e13a8e4c2c8e00d037e574d1a402f0398c3bd190bb4dcabbb94fbccaff0c1: Status 404 returned error can't find the container with id 9e4e13a8e4c2c8e00d037e574d1a402f0398c3bd190bb4dcabbb94fbccaff0c1 Nov 25 09:50:40 crc kubenswrapper[4687]: I1125 09:50:40.385760 4687 generic.go:334] "Generic (PLEG): container finished" podID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerID="e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69" exitCode=0 Nov 25 09:50:40 crc kubenswrapper[4687]: I1125 09:50:40.385888 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs8kk" event={"ID":"07dc6774-5763-4b72-b9ba-5b94fa3bc081","Type":"ContainerDied","Data":"e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69"} Nov 25 09:50:40 crc kubenswrapper[4687]: I1125 09:50:40.388702 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs8kk" event={"ID":"07dc6774-5763-4b72-b9ba-5b94fa3bc081","Type":"ContainerStarted","Data":"9e4e13a8e4c2c8e00d037e574d1a402f0398c3bd190bb4dcabbb94fbccaff0c1"} Nov 25 09:50:41 crc kubenswrapper[4687]: I1125 09:50:41.018673 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pfbd9"] Nov 25 09:50:41 crc kubenswrapper[4687]: I1125 09:50:41.018914 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pfbd9" podUID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerName="registry-server" containerID="cri-o://08e1d35ab91ae8b90c7f90c6ed81bdc6a688505d36ad07cbc3f1f1bcefbec0e5" gracePeriod=2 Nov 25 09:50:41 crc kubenswrapper[4687]: I1125 09:50:41.401169 4687 generic.go:334] "Generic (PLEG): container finished" podID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerID="08e1d35ab91ae8b90c7f90c6ed81bdc6a688505d36ad07cbc3f1f1bcefbec0e5" exitCode=0 Nov 25 09:50:41 crc kubenswrapper[4687]: I1125 09:50:41.401448 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfbd9" event={"ID":"430b5b1f-5485-4bd0-b597-8bb396fd04bd","Type":"ContainerDied","Data":"08e1d35ab91ae8b90c7f90c6ed81bdc6a688505d36ad07cbc3f1f1bcefbec0e5"} Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.018331 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.170467 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvzq2\" (UniqueName: \"kubernetes.io/projected/430b5b1f-5485-4bd0-b597-8bb396fd04bd-kube-api-access-gvzq2\") pod \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.170668 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-utilities\") pod \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.170718 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-catalog-content\") pod \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\" (UID: \"430b5b1f-5485-4bd0-b597-8bb396fd04bd\") " Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.171664 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-utilities" (OuterVolumeSpecName: "utilities") pod "430b5b1f-5485-4bd0-b597-8bb396fd04bd" (UID: "430b5b1f-5485-4bd0-b597-8bb396fd04bd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.176607 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/430b5b1f-5485-4bd0-b597-8bb396fd04bd-kube-api-access-gvzq2" (OuterVolumeSpecName: "kube-api-access-gvzq2") pod "430b5b1f-5485-4bd0-b597-8bb396fd04bd" (UID: "430b5b1f-5485-4bd0-b597-8bb396fd04bd"). InnerVolumeSpecName "kube-api-access-gvzq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.189180 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "430b5b1f-5485-4bd0-b597-8bb396fd04bd" (UID: "430b5b1f-5485-4bd0-b597-8bb396fd04bd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.272699 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvzq2\" (UniqueName: \"kubernetes.io/projected/430b5b1f-5485-4bd0-b597-8bb396fd04bd-kube-api-access-gvzq2\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.272754 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.272764 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/430b5b1f-5485-4bd0-b597-8bb396fd04bd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.410682 4687 generic.go:334] "Generic (PLEG): container finished" podID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerID="078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366" exitCode=0 Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.410745 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs8kk" event={"ID":"07dc6774-5763-4b72-b9ba-5b94fa3bc081","Type":"ContainerDied","Data":"078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366"} Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.414440 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pfbd9" event={"ID":"430b5b1f-5485-4bd0-b597-8bb396fd04bd","Type":"ContainerDied","Data":"38e84d1e07273d66749e65d8f9c63af4b8485d315321b95ee53a4bcccb032d38"} Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.414497 4687 scope.go:117] "RemoveContainer" containerID="08e1d35ab91ae8b90c7f90c6ed81bdc6a688505d36ad07cbc3f1f1bcefbec0e5" Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.414676 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pfbd9" Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.438440 4687 scope.go:117] "RemoveContainer" containerID="05ad750463a125a3fe6a756ddfb75160842761cb55c1cc80640f999981a0e769" Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.452863 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pfbd9"] Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.460852 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pfbd9"] Nov 25 09:50:42 crc kubenswrapper[4687]: I1125 09:50:42.474418 4687 scope.go:117] "RemoveContainer" containerID="acd511e4263492e824357ec29784286a960175685799a94dc2012cf25a4d3855" Nov 25 09:50:43 crc kubenswrapper[4687]: I1125 09:50:43.436357 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs8kk" event={"ID":"07dc6774-5763-4b72-b9ba-5b94fa3bc081","Type":"ContainerStarted","Data":"f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379"} Nov 25 09:50:43 crc kubenswrapper[4687]: I1125 09:50:43.464329 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qs8kk" podStartSLOduration=2.739480696 podStartE2EDuration="5.464307654s" podCreationTimestamp="2025-11-25 09:50:38 +0000 UTC" firstStartedPulling="2025-11-25 09:50:40.388253776 +0000 UTC m=+2835.441893494" lastFinishedPulling="2025-11-25 09:50:43.113080734 +0000 UTC m=+2838.166720452" observedRunningTime="2025-11-25 09:50:43.461165512 +0000 UTC m=+2838.514805230" watchObservedRunningTime="2025-11-25 09:50:43.464307654 +0000 UTC m=+2838.517947382" Nov 25 09:50:43 crc kubenswrapper[4687]: I1125 09:50:43.746242 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" path="/var/lib/kubelet/pods/430b5b1f-5485-4bd0-b597-8bb396fd04bd/volumes" Nov 25 09:50:48 crc kubenswrapper[4687]: I1125 09:50:48.963674 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:48 crc kubenswrapper[4687]: I1125 09:50:48.964225 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:49 crc kubenswrapper[4687]: I1125 09:50:49.020982 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:49 crc kubenswrapper[4687]: I1125 09:50:49.552575 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:49 crc kubenswrapper[4687]: I1125 09:50:49.616593 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qs8kk"] Nov 25 09:50:51 crc kubenswrapper[4687]: I1125 09:50:51.517740 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qs8kk" podUID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerName="registry-server" containerID="cri-o://f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379" gracePeriod=2 Nov 25 09:50:51 crc kubenswrapper[4687]: I1125 09:50:51.961279 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.080621 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-catalog-content\") pod \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.080684 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-utilities\") pod \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.080873 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtr7r\" (UniqueName: \"kubernetes.io/projected/07dc6774-5763-4b72-b9ba-5b94fa3bc081-kube-api-access-qtr7r\") pod \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\" (UID: \"07dc6774-5763-4b72-b9ba-5b94fa3bc081\") " Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.081961 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-utilities" (OuterVolumeSpecName: "utilities") pod "07dc6774-5763-4b72-b9ba-5b94fa3bc081" (UID: "07dc6774-5763-4b72-b9ba-5b94fa3bc081"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.086680 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07dc6774-5763-4b72-b9ba-5b94fa3bc081-kube-api-access-qtr7r" (OuterVolumeSpecName: "kube-api-access-qtr7r") pod "07dc6774-5763-4b72-b9ba-5b94fa3bc081" (UID: "07dc6774-5763-4b72-b9ba-5b94fa3bc081"). InnerVolumeSpecName "kube-api-access-qtr7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.143933 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07dc6774-5763-4b72-b9ba-5b94fa3bc081" (UID: "07dc6774-5763-4b72-b9ba-5b94fa3bc081"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.182976 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.183012 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07dc6774-5763-4b72-b9ba-5b94fa3bc081-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.183023 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtr7r\" (UniqueName: \"kubernetes.io/projected/07dc6774-5763-4b72-b9ba-5b94fa3bc081-kube-api-access-qtr7r\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.537949 4687 generic.go:334] "Generic (PLEG): container finished" podID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerID="f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379" exitCode=0 Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.537995 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs8kk" event={"ID":"07dc6774-5763-4b72-b9ba-5b94fa3bc081","Type":"ContainerDied","Data":"f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379"} Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.538027 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs8kk" event={"ID":"07dc6774-5763-4b72-b9ba-5b94fa3bc081","Type":"ContainerDied","Data":"9e4e13a8e4c2c8e00d037e574d1a402f0398c3bd190bb4dcabbb94fbccaff0c1"} Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.538048 4687 scope.go:117] "RemoveContainer" containerID="f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.538192 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qs8kk" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.568566 4687 scope.go:117] "RemoveContainer" containerID="078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.574477 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qs8kk"] Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.586441 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qs8kk"] Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.597901 4687 scope.go:117] "RemoveContainer" containerID="e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.645493 4687 scope.go:117] "RemoveContainer" containerID="f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379" Nov 25 09:50:52 crc kubenswrapper[4687]: E1125 09:50:52.648146 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379\": container with ID starting with f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379 not found: ID does not exist" containerID="f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.648184 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379"} err="failed to get container status \"f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379\": rpc error: code = NotFound desc = could not find container \"f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379\": container with ID starting with f82d6c8b93697f85aa6589ecd9d1d39d85fd496fc63963fdecce38d23b402379 not found: ID does not exist" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.648212 4687 scope.go:117] "RemoveContainer" containerID="078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366" Nov 25 09:50:52 crc kubenswrapper[4687]: E1125 09:50:52.648832 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366\": container with ID starting with 078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366 not found: ID does not exist" containerID="078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.648874 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366"} err="failed to get container status \"078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366\": rpc error: code = NotFound desc = could not find container \"078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366\": container with ID starting with 078d094b6cac22b3c079d74ab4513a3f396895de954e3c91e7d1164f45cc5366 not found: ID does not exist" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.648906 4687 scope.go:117] "RemoveContainer" containerID="e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69" Nov 25 09:50:52 crc kubenswrapper[4687]: E1125 09:50:52.649310 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69\": container with ID starting with e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69 not found: ID does not exist" containerID="e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69" Nov 25 09:50:52 crc kubenswrapper[4687]: I1125 09:50:52.649330 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69"} err="failed to get container status \"e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69\": rpc error: code = NotFound desc = could not find container \"e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69\": container with ID starting with e419f05495de8109a68dd3a735fd34aa81cbc889469632b1f7a19c3eac9e6e69 not found: ID does not exist" Nov 25 09:50:53 crc kubenswrapper[4687]: I1125 09:50:53.745793 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" path="/var/lib/kubelet/pods/07dc6774-5763-4b72-b9ba-5b94fa3bc081/volumes" Nov 25 09:50:53 crc kubenswrapper[4687]: I1125 09:50:53.845029 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:50:53 crc kubenswrapper[4687]: I1125 09:50:53.845340 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.864170 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 09:51:21 crc kubenswrapper[4687]: E1125 09:51:21.865534 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerName="extract-content" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.865551 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerName="extract-content" Nov 25 09:51:21 crc kubenswrapper[4687]: E1125 09:51:21.865586 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerName="extract-content" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.865594 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerName="extract-content" Nov 25 09:51:21 crc kubenswrapper[4687]: E1125 09:51:21.865611 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerName="extract-utilities" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.865619 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerName="extract-utilities" Nov 25 09:51:21 crc kubenswrapper[4687]: E1125 09:51:21.865640 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerName="registry-server" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.865648 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerName="registry-server" Nov 25 09:51:21 crc kubenswrapper[4687]: E1125 09:51:21.865661 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerName="extract-utilities" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.865668 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerName="extract-utilities" Nov 25 09:51:21 crc kubenswrapper[4687]: E1125 09:51:21.865681 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerName="registry-server" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.865690 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerName="registry-server" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.865912 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="07dc6774-5763-4b72-b9ba-5b94fa3bc081" containerName="registry-server" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.865926 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="430b5b1f-5485-4bd0-b597-8bb396fd04bd" containerName="registry-server" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.866637 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.869429 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.869429 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.869581 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.869643 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-w2rpv" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.888714 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.938265 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-config-data\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.938336 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.938397 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.938431 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.938478 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.938527 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnkg9\" (UniqueName: \"kubernetes.io/projected/f60c7882-f90a-4cfd-93a4-1cf51c29315a-kube-api-access-jnkg9\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.938564 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.938595 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:21 crc kubenswrapper[4687]: I1125 09:51:21.938615 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.040834 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.040902 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.040959 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.040979 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnkg9\" (UniqueName: \"kubernetes.io/projected/f60c7882-f90a-4cfd-93a4-1cf51c29315a-kube-api-access-jnkg9\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.040995 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.041012 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.041029 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.041059 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-config-data\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.041094 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.042132 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.042187 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.042814 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-config-data\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.043065 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.044128 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.048635 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.049291 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.049845 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.060584 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnkg9\" (UniqueName: \"kubernetes.io/projected/f60c7882-f90a-4cfd-93a4-1cf51c29315a-kube-api-access-jnkg9\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.071765 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"tempest-tests-tempest\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.193546 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.640042 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 09:51:22 crc kubenswrapper[4687]: W1125 09:51:22.645447 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf60c7882_f90a_4cfd_93a4_1cf51c29315a.slice/crio-9df11e75322447f742e9fb589e9d640c7691191af2d000d74667d0911b42704f WatchSource:0}: Error finding container 9df11e75322447f742e9fb589e9d640c7691191af2d000d74667d0911b42704f: Status 404 returned error can't find the container with id 9df11e75322447f742e9fb589e9d640c7691191af2d000d74667d0911b42704f Nov 25 09:51:22 crc kubenswrapper[4687]: I1125 09:51:22.796443 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"f60c7882-f90a-4cfd-93a4-1cf51c29315a","Type":"ContainerStarted","Data":"9df11e75322447f742e9fb589e9d640c7691191af2d000d74667d0911b42704f"} Nov 25 09:51:23 crc kubenswrapper[4687]: I1125 09:51:23.844622 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:51:23 crc kubenswrapper[4687]: I1125 09:51:23.844710 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:51:52 crc kubenswrapper[4687]: E1125 09:51:52.464313 4687 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 25 09:51:52 crc kubenswrapper[4687]: E1125 09:51:52.464901 4687 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jnkg9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(f60c7882-f90a-4cfd-93a4-1cf51c29315a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:51:52 crc kubenswrapper[4687]: E1125 09:51:52.466070 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="f60c7882-f90a-4cfd-93a4-1cf51c29315a" Nov 25 09:51:53 crc kubenswrapper[4687]: E1125 09:51:53.151130 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="f60c7882-f90a-4cfd-93a4-1cf51c29315a" Nov 25 09:51:53 crc kubenswrapper[4687]: I1125 09:51:53.844694 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:51:53 crc kubenswrapper[4687]: I1125 09:51:53.844779 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:51:53 crc kubenswrapper[4687]: I1125 09:51:53.844844 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:51:53 crc kubenswrapper[4687]: I1125 09:51:53.845846 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a8a2a78700106a41080a4da283d63c1bfe9b146dc1fcdfe20f9e7b23579d1d20"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:51:53 crc kubenswrapper[4687]: I1125 09:51:53.845929 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://a8a2a78700106a41080a4da283d63c1bfe9b146dc1fcdfe20f9e7b23579d1d20" gracePeriod=600 Nov 25 09:51:54 crc kubenswrapper[4687]: I1125 09:51:54.161104 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="a8a2a78700106a41080a4da283d63c1bfe9b146dc1fcdfe20f9e7b23579d1d20" exitCode=0 Nov 25 09:51:54 crc kubenswrapper[4687]: I1125 09:51:54.161179 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"a8a2a78700106a41080a4da283d63c1bfe9b146dc1fcdfe20f9e7b23579d1d20"} Nov 25 09:51:54 crc kubenswrapper[4687]: I1125 09:51:54.161664 4687 scope.go:117] "RemoveContainer" containerID="40cef52cd1121feef3cf1afe94785e3fcf1fb44f13c55a694dfc819ed4a7d04e" Nov 25 09:51:55 crc kubenswrapper[4687]: I1125 09:51:55.176105 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625"} Nov 25 09:52:05 crc kubenswrapper[4687]: I1125 09:52:05.149618 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 09:52:06 crc kubenswrapper[4687]: I1125 09:52:06.274188 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"f60c7882-f90a-4cfd-93a4-1cf51c29315a","Type":"ContainerStarted","Data":"4a5aa9f8af56508ffb87235cdc35f71cc5e581c521b486d0125c19eea9a56024"} Nov 25 09:52:06 crc kubenswrapper[4687]: I1125 09:52:06.298055 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.7993278569999998 podStartE2EDuration="46.298030913s" podCreationTimestamp="2025-11-25 09:51:20 +0000 UTC" firstStartedPulling="2025-11-25 09:51:22.647944922 +0000 UTC m=+2877.701584640" lastFinishedPulling="2025-11-25 09:52:05.146647988 +0000 UTC m=+2920.200287696" observedRunningTime="2025-11-25 09:52:06.291556593 +0000 UTC m=+2921.345196321" watchObservedRunningTime="2025-11-25 09:52:06.298030913 +0000 UTC m=+2921.351670631" Nov 25 09:54:23 crc kubenswrapper[4687]: I1125 09:54:23.844889 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:54:23 crc kubenswrapper[4687]: I1125 09:54:23.845678 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:54:53 crc kubenswrapper[4687]: I1125 09:54:53.845232 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:54:53 crc kubenswrapper[4687]: I1125 09:54:53.845878 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:55:23 crc kubenswrapper[4687]: I1125 09:55:23.845466 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:55:23 crc kubenswrapper[4687]: I1125 09:55:23.845921 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:55:23 crc kubenswrapper[4687]: I1125 09:55:23.845971 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 09:55:23 crc kubenswrapper[4687]: I1125 09:55:23.846776 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:55:23 crc kubenswrapper[4687]: I1125 09:55:23.846836 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" gracePeriod=600 Nov 25 09:55:23 crc kubenswrapper[4687]: E1125 09:55:23.968926 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:55:24 crc kubenswrapper[4687]: I1125 09:55:24.187855 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" exitCode=0 Nov 25 09:55:24 crc kubenswrapper[4687]: I1125 09:55:24.187913 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625"} Nov 25 09:55:24 crc kubenswrapper[4687]: I1125 09:55:24.187986 4687 scope.go:117] "RemoveContainer" containerID="a8a2a78700106a41080a4da283d63c1bfe9b146dc1fcdfe20f9e7b23579d1d20" Nov 25 09:55:24 crc kubenswrapper[4687]: I1125 09:55:24.188765 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:55:24 crc kubenswrapper[4687]: E1125 09:55:24.189094 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:55:38 crc kubenswrapper[4687]: I1125 09:55:38.736016 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:55:38 crc kubenswrapper[4687]: E1125 09:55:38.736941 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:55:52 crc kubenswrapper[4687]: I1125 09:55:52.734916 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:55:52 crc kubenswrapper[4687]: E1125 09:55:52.736650 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:56:05 crc kubenswrapper[4687]: I1125 09:56:05.740681 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:56:05 crc kubenswrapper[4687]: E1125 09:56:05.741573 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:56:18 crc kubenswrapper[4687]: I1125 09:56:18.303158 4687 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-ztsnq container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.20:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 09:56:18 crc kubenswrapper[4687]: I1125 09:56:18.303764 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ztsnq" podUID="b968a427-bae8-41af-a3b7-1ef108cefb0d" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.20:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:56:19 crc kubenswrapper[4687]: I1125 09:56:19.735096 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:56:19 crc kubenswrapper[4687]: E1125 09:56:19.735739 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:56:31 crc kubenswrapper[4687]: I1125 09:56:31.735578 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:56:31 crc kubenswrapper[4687]: E1125 09:56:31.736481 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:56:44 crc kubenswrapper[4687]: I1125 09:56:44.734962 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:56:44 crc kubenswrapper[4687]: E1125 09:56:44.735892 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:56:57 crc kubenswrapper[4687]: I1125 09:56:57.735164 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:56:57 crc kubenswrapper[4687]: E1125 09:56:57.736119 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:57:12 crc kubenswrapper[4687]: I1125 09:57:12.735245 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:57:12 crc kubenswrapper[4687]: E1125 09:57:12.736054 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.104702 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hmqzx"] Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.107922 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.113138 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hmqzx"] Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.212686 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-utilities\") pod \"community-operators-hmqzx\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.212970 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwdk9\" (UniqueName: \"kubernetes.io/projected/302ea83c-d01c-4ba1-b7ca-214bfa958749-kube-api-access-gwdk9\") pod \"community-operators-hmqzx\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.213040 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-catalog-content\") pod \"community-operators-hmqzx\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.314814 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-utilities\") pod \"community-operators-hmqzx\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.314979 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwdk9\" (UniqueName: \"kubernetes.io/projected/302ea83c-d01c-4ba1-b7ca-214bfa958749-kube-api-access-gwdk9\") pod \"community-operators-hmqzx\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.315016 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-catalog-content\") pod \"community-operators-hmqzx\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.315438 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-utilities\") pod \"community-operators-hmqzx\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.315549 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-catalog-content\") pod \"community-operators-hmqzx\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.336659 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwdk9\" (UniqueName: \"kubernetes.io/projected/302ea83c-d01c-4ba1-b7ca-214bfa958749-kube-api-access-gwdk9\") pod \"community-operators-hmqzx\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.435476 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:26 crc kubenswrapper[4687]: I1125 09:57:26.736679 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:57:26 crc kubenswrapper[4687]: E1125 09:57:26.736942 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:57:27 crc kubenswrapper[4687]: I1125 09:57:27.014468 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hmqzx"] Nov 25 09:57:27 crc kubenswrapper[4687]: I1125 09:57:27.357756 4687 generic.go:334] "Generic (PLEG): container finished" podID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerID="dbc95a395ae237555b83c006018ed8a51d903443737224037d20c893b2f933fa" exitCode=0 Nov 25 09:57:27 crc kubenswrapper[4687]: I1125 09:57:27.357832 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hmqzx" event={"ID":"302ea83c-d01c-4ba1-b7ca-214bfa958749","Type":"ContainerDied","Data":"dbc95a395ae237555b83c006018ed8a51d903443737224037d20c893b2f933fa"} Nov 25 09:57:27 crc kubenswrapper[4687]: I1125 09:57:27.358098 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hmqzx" event={"ID":"302ea83c-d01c-4ba1-b7ca-214bfa958749","Type":"ContainerStarted","Data":"8efe68920e96520a8bf62e344e436d0f1b98a114d6f77d242bf0e25fdaf07062"} Nov 25 09:57:27 crc kubenswrapper[4687]: I1125 09:57:27.361549 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:57:29 crc kubenswrapper[4687]: I1125 09:57:29.377746 4687 generic.go:334] "Generic (PLEG): container finished" podID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerID="f9f653aaafbdb61a48ef01f2c05ef986a9b05bf6b5c6715a31a12ecd82005e99" exitCode=0 Nov 25 09:57:29 crc kubenswrapper[4687]: I1125 09:57:29.377831 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hmqzx" event={"ID":"302ea83c-d01c-4ba1-b7ca-214bfa958749","Type":"ContainerDied","Data":"f9f653aaafbdb61a48ef01f2c05ef986a9b05bf6b5c6715a31a12ecd82005e99"} Nov 25 09:57:30 crc kubenswrapper[4687]: I1125 09:57:30.391710 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hmqzx" event={"ID":"302ea83c-d01c-4ba1-b7ca-214bfa958749","Type":"ContainerStarted","Data":"d7407379baad9fa2d0aaa4283ca4aec8cc245e8b532224a2494386511b67ecc6"} Nov 25 09:57:30 crc kubenswrapper[4687]: I1125 09:57:30.413628 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hmqzx" podStartSLOduration=1.910623789 podStartE2EDuration="4.413606829s" podCreationTimestamp="2025-11-25 09:57:26 +0000 UTC" firstStartedPulling="2025-11-25 09:57:27.361091186 +0000 UTC m=+3242.414730904" lastFinishedPulling="2025-11-25 09:57:29.864074226 +0000 UTC m=+3244.917713944" observedRunningTime="2025-11-25 09:57:30.410378161 +0000 UTC m=+3245.464017919" watchObservedRunningTime="2025-11-25 09:57:30.413606829 +0000 UTC m=+3245.467246557" Nov 25 09:57:36 crc kubenswrapper[4687]: I1125 09:57:36.436309 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:36 crc kubenswrapper[4687]: I1125 09:57:36.436945 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:36 crc kubenswrapper[4687]: I1125 09:57:36.506519 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:37 crc kubenswrapper[4687]: I1125 09:57:37.502102 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:37 crc kubenswrapper[4687]: I1125 09:57:37.550485 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hmqzx"] Nov 25 09:57:37 crc kubenswrapper[4687]: I1125 09:57:37.735883 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:57:37 crc kubenswrapper[4687]: E1125 09:57:37.736167 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:57:39 crc kubenswrapper[4687]: I1125 09:57:39.465003 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hmqzx" podUID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerName="registry-server" containerID="cri-o://d7407379baad9fa2d0aaa4283ca4aec8cc245e8b532224a2494386511b67ecc6" gracePeriod=2 Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.488705 4687 generic.go:334] "Generic (PLEG): container finished" podID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerID="d7407379baad9fa2d0aaa4283ca4aec8cc245e8b532224a2494386511b67ecc6" exitCode=0 Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.488913 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hmqzx" event={"ID":"302ea83c-d01c-4ba1-b7ca-214bfa958749","Type":"ContainerDied","Data":"d7407379baad9fa2d0aaa4283ca4aec8cc245e8b532224a2494386511b67ecc6"} Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.635055 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.654170 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-utilities\") pod \"302ea83c-d01c-4ba1-b7ca-214bfa958749\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.654230 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-catalog-content\") pod \"302ea83c-d01c-4ba1-b7ca-214bfa958749\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.654294 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gwdk9\" (UniqueName: \"kubernetes.io/projected/302ea83c-d01c-4ba1-b7ca-214bfa958749-kube-api-access-gwdk9\") pod \"302ea83c-d01c-4ba1-b7ca-214bfa958749\" (UID: \"302ea83c-d01c-4ba1-b7ca-214bfa958749\") " Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.655440 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-utilities" (OuterVolumeSpecName: "utilities") pod "302ea83c-d01c-4ba1-b7ca-214bfa958749" (UID: "302ea83c-d01c-4ba1-b7ca-214bfa958749"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.661751 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/302ea83c-d01c-4ba1-b7ca-214bfa958749-kube-api-access-gwdk9" (OuterVolumeSpecName: "kube-api-access-gwdk9") pod "302ea83c-d01c-4ba1-b7ca-214bfa958749" (UID: "302ea83c-d01c-4ba1-b7ca-214bfa958749"). InnerVolumeSpecName "kube-api-access-gwdk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.710191 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "302ea83c-d01c-4ba1-b7ca-214bfa958749" (UID: "302ea83c-d01c-4ba1-b7ca-214bfa958749"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.755848 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.755895 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gwdk9\" (UniqueName: \"kubernetes.io/projected/302ea83c-d01c-4ba1-b7ca-214bfa958749-kube-api-access-gwdk9\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:41 crc kubenswrapper[4687]: I1125 09:57:41.755909 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/302ea83c-d01c-4ba1-b7ca-214bfa958749-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:42 crc kubenswrapper[4687]: I1125 09:57:42.501185 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hmqzx" event={"ID":"302ea83c-d01c-4ba1-b7ca-214bfa958749","Type":"ContainerDied","Data":"8efe68920e96520a8bf62e344e436d0f1b98a114d6f77d242bf0e25fdaf07062"} Nov 25 09:57:42 crc kubenswrapper[4687]: I1125 09:57:42.501556 4687 scope.go:117] "RemoveContainer" containerID="d7407379baad9fa2d0aaa4283ca4aec8cc245e8b532224a2494386511b67ecc6" Nov 25 09:57:42 crc kubenswrapper[4687]: I1125 09:57:42.501288 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hmqzx" Nov 25 09:57:42 crc kubenswrapper[4687]: I1125 09:57:42.536000 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hmqzx"] Nov 25 09:57:42 crc kubenswrapper[4687]: I1125 09:57:42.536612 4687 scope.go:117] "RemoveContainer" containerID="f9f653aaafbdb61a48ef01f2c05ef986a9b05bf6b5c6715a31a12ecd82005e99" Nov 25 09:57:42 crc kubenswrapper[4687]: I1125 09:57:42.544168 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hmqzx"] Nov 25 09:57:42 crc kubenswrapper[4687]: I1125 09:57:42.560149 4687 scope.go:117] "RemoveContainer" containerID="dbc95a395ae237555b83c006018ed8a51d903443737224037d20c893b2f933fa" Nov 25 09:57:43 crc kubenswrapper[4687]: I1125 09:57:43.746134 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="302ea83c-d01c-4ba1-b7ca-214bfa958749" path="/var/lib/kubelet/pods/302ea83c-d01c-4ba1-b7ca-214bfa958749/volumes" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.324721 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jcbzc"] Nov 25 09:57:45 crc kubenswrapper[4687]: E1125 09:57:45.325094 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerName="registry-server" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.325108 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerName="registry-server" Nov 25 09:57:45 crc kubenswrapper[4687]: E1125 09:57:45.325124 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerName="extract-content" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.325130 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerName="extract-content" Nov 25 09:57:45 crc kubenswrapper[4687]: E1125 09:57:45.325143 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerName="extract-utilities" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.325150 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerName="extract-utilities" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.325347 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="302ea83c-d01c-4ba1-b7ca-214bfa958749" containerName="registry-server" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.327592 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.348639 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jcbzc"] Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.532561 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlrjr\" (UniqueName: \"kubernetes.io/projected/cb38e708-1b5b-4279-b52f-2814c1b276b0-kube-api-access-hlrjr\") pod \"redhat-operators-jcbzc\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.533273 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-utilities\") pod \"redhat-operators-jcbzc\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.533401 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-catalog-content\") pod \"redhat-operators-jcbzc\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.635007 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-utilities\") pod \"redhat-operators-jcbzc\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.635066 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-catalog-content\") pod \"redhat-operators-jcbzc\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.635250 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlrjr\" (UniqueName: \"kubernetes.io/projected/cb38e708-1b5b-4279-b52f-2814c1b276b0-kube-api-access-hlrjr\") pod \"redhat-operators-jcbzc\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.635684 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-utilities\") pod \"redhat-operators-jcbzc\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.635708 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-catalog-content\") pod \"redhat-operators-jcbzc\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.657909 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlrjr\" (UniqueName: \"kubernetes.io/projected/cb38e708-1b5b-4279-b52f-2814c1b276b0-kube-api-access-hlrjr\") pod \"redhat-operators-jcbzc\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:45 crc kubenswrapper[4687]: I1125 09:57:45.958683 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:46 crc kubenswrapper[4687]: I1125 09:57:46.453788 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jcbzc"] Nov 25 09:57:46 crc kubenswrapper[4687]: I1125 09:57:46.552857 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbzc" event={"ID":"cb38e708-1b5b-4279-b52f-2814c1b276b0","Type":"ContainerStarted","Data":"80ae0115dc49e4ab6dd99506081014a404a082f7ab480ea056d87ad5cda66424"} Nov 25 09:57:47 crc kubenswrapper[4687]: I1125 09:57:47.562542 4687 generic.go:334] "Generic (PLEG): container finished" podID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerID="0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529" exitCode=0 Nov 25 09:57:47 crc kubenswrapper[4687]: I1125 09:57:47.562597 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbzc" event={"ID":"cb38e708-1b5b-4279-b52f-2814c1b276b0","Type":"ContainerDied","Data":"0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529"} Nov 25 09:57:49 crc kubenswrapper[4687]: I1125 09:57:49.581068 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbzc" event={"ID":"cb38e708-1b5b-4279-b52f-2814c1b276b0","Type":"ContainerStarted","Data":"e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624"} Nov 25 09:57:49 crc kubenswrapper[4687]: I1125 09:57:49.734914 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:57:49 crc kubenswrapper[4687]: E1125 09:57:49.735208 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:57:50 crc kubenswrapper[4687]: I1125 09:57:50.594029 4687 generic.go:334] "Generic (PLEG): container finished" podID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerID="e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624" exitCode=0 Nov 25 09:57:50 crc kubenswrapper[4687]: I1125 09:57:50.594105 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbzc" event={"ID":"cb38e708-1b5b-4279-b52f-2814c1b276b0","Type":"ContainerDied","Data":"e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624"} Nov 25 09:57:51 crc kubenswrapper[4687]: I1125 09:57:51.608578 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbzc" event={"ID":"cb38e708-1b5b-4279-b52f-2814c1b276b0","Type":"ContainerStarted","Data":"28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1"} Nov 25 09:57:51 crc kubenswrapper[4687]: I1125 09:57:51.637000 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jcbzc" podStartSLOduration=2.884906462 podStartE2EDuration="6.636980076s" podCreationTimestamp="2025-11-25 09:57:45 +0000 UTC" firstStartedPulling="2025-11-25 09:57:47.564446222 +0000 UTC m=+3262.618085930" lastFinishedPulling="2025-11-25 09:57:51.316519826 +0000 UTC m=+3266.370159544" observedRunningTime="2025-11-25 09:57:51.625921587 +0000 UTC m=+3266.679561315" watchObservedRunningTime="2025-11-25 09:57:51.636980076 +0000 UTC m=+3266.690619794" Nov 25 09:57:55 crc kubenswrapper[4687]: I1125 09:57:55.959072 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:55 crc kubenswrapper[4687]: I1125 09:57:55.960563 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:57:57 crc kubenswrapper[4687]: I1125 09:57:57.011900 4687 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jcbzc" podUID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerName="registry-server" probeResult="failure" output=< Nov 25 09:57:57 crc kubenswrapper[4687]: timeout: failed to connect service ":50051" within 1s Nov 25 09:57:57 crc kubenswrapper[4687]: > Nov 25 09:58:04 crc kubenswrapper[4687]: I1125 09:58:04.734999 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:58:04 crc kubenswrapper[4687]: E1125 09:58:04.735722 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:58:06 crc kubenswrapper[4687]: I1125 09:58:06.003934 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:58:06 crc kubenswrapper[4687]: I1125 09:58:06.059585 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:58:06 crc kubenswrapper[4687]: I1125 09:58:06.244463 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jcbzc"] Nov 25 09:58:07 crc kubenswrapper[4687]: I1125 09:58:07.739142 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jcbzc" podUID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerName="registry-server" containerID="cri-o://28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1" gracePeriod=2 Nov 25 09:58:08 crc kubenswrapper[4687]: E1125 09:58:08.004158 4687 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb38e708_1b5b_4279_b52f_2814c1b276b0.slice/crio-conmon-28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.249779 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.277719 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlrjr\" (UniqueName: \"kubernetes.io/projected/cb38e708-1b5b-4279-b52f-2814c1b276b0-kube-api-access-hlrjr\") pod \"cb38e708-1b5b-4279-b52f-2814c1b276b0\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.277779 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-catalog-content\") pod \"cb38e708-1b5b-4279-b52f-2814c1b276b0\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.278080 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-utilities\") pod \"cb38e708-1b5b-4279-b52f-2814c1b276b0\" (UID: \"cb38e708-1b5b-4279-b52f-2814c1b276b0\") " Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.279004 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-utilities" (OuterVolumeSpecName: "utilities") pod "cb38e708-1b5b-4279-b52f-2814c1b276b0" (UID: "cb38e708-1b5b-4279-b52f-2814c1b276b0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.285284 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb38e708-1b5b-4279-b52f-2814c1b276b0-kube-api-access-hlrjr" (OuterVolumeSpecName: "kube-api-access-hlrjr") pod "cb38e708-1b5b-4279-b52f-2814c1b276b0" (UID: "cb38e708-1b5b-4279-b52f-2814c1b276b0"). InnerVolumeSpecName "kube-api-access-hlrjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.372933 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb38e708-1b5b-4279-b52f-2814c1b276b0" (UID: "cb38e708-1b5b-4279-b52f-2814c1b276b0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.382122 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.382172 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlrjr\" (UniqueName: \"kubernetes.io/projected/cb38e708-1b5b-4279-b52f-2814c1b276b0-kube-api-access-hlrjr\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.382186 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb38e708-1b5b-4279-b52f-2814c1b276b0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.754771 4687 generic.go:334] "Generic (PLEG): container finished" podID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerID="28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1" exitCode=0 Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.754854 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbzc" event={"ID":"cb38e708-1b5b-4279-b52f-2814c1b276b0","Type":"ContainerDied","Data":"28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1"} Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.754900 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbzc" event={"ID":"cb38e708-1b5b-4279-b52f-2814c1b276b0","Type":"ContainerDied","Data":"80ae0115dc49e4ab6dd99506081014a404a082f7ab480ea056d87ad5cda66424"} Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.754927 4687 scope.go:117] "RemoveContainer" containerID="28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.754932 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcbzc" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.780116 4687 scope.go:117] "RemoveContainer" containerID="e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.793547 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jcbzc"] Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.801799 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jcbzc"] Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.821291 4687 scope.go:117] "RemoveContainer" containerID="0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.854304 4687 scope.go:117] "RemoveContainer" containerID="28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1" Nov 25 09:58:08 crc kubenswrapper[4687]: E1125 09:58:08.855040 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1\": container with ID starting with 28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1 not found: ID does not exist" containerID="28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.855097 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1"} err="failed to get container status \"28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1\": rpc error: code = NotFound desc = could not find container \"28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1\": container with ID starting with 28ba8a58c107a1d9c3991d20a0bd9d88c34b0ca0964f1cf161e9ba3c9a375ac1 not found: ID does not exist" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.855131 4687 scope.go:117] "RemoveContainer" containerID="e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624" Nov 25 09:58:08 crc kubenswrapper[4687]: E1125 09:58:08.855623 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624\": container with ID starting with e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624 not found: ID does not exist" containerID="e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.855663 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624"} err="failed to get container status \"e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624\": rpc error: code = NotFound desc = could not find container \"e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624\": container with ID starting with e9f57c32b82e5972650398c729e91e2bb8e73259c9c9e570b919ea8380bfd624 not found: ID does not exist" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.855691 4687 scope.go:117] "RemoveContainer" containerID="0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529" Nov 25 09:58:08 crc kubenswrapper[4687]: E1125 09:58:08.856029 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529\": container with ID starting with 0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529 not found: ID does not exist" containerID="0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529" Nov 25 09:58:08 crc kubenswrapper[4687]: I1125 09:58:08.856080 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529"} err="failed to get container status \"0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529\": rpc error: code = NotFound desc = could not find container \"0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529\": container with ID starting with 0a5d003bdd56d3548db87bac52e1f4c1c66eafed6963b05391b3de1f6be52529 not found: ID does not exist" Nov 25 09:58:09 crc kubenswrapper[4687]: I1125 09:58:09.751055 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb38e708-1b5b-4279-b52f-2814c1b276b0" path="/var/lib/kubelet/pods/cb38e708-1b5b-4279-b52f-2814c1b276b0/volumes" Nov 25 09:58:17 crc kubenswrapper[4687]: I1125 09:58:17.735193 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:58:17 crc kubenswrapper[4687]: E1125 09:58:17.735953 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:58:31 crc kubenswrapper[4687]: I1125 09:58:31.735126 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:58:31 crc kubenswrapper[4687]: E1125 09:58:31.735921 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:58:42 crc kubenswrapper[4687]: I1125 09:58:42.734676 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:58:42 crc kubenswrapper[4687]: E1125 09:58:42.735332 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:58:57 crc kubenswrapper[4687]: I1125 09:58:57.735375 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:58:57 crc kubenswrapper[4687]: E1125 09:58:57.736173 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:59:08 crc kubenswrapper[4687]: I1125 09:59:08.735164 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:59:08 crc kubenswrapper[4687]: E1125 09:59:08.736036 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:59:20 crc kubenswrapper[4687]: I1125 09:59:20.735624 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:59:20 crc kubenswrapper[4687]: E1125 09:59:20.736965 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:59:35 crc kubenswrapper[4687]: I1125 09:59:35.753714 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:59:35 crc kubenswrapper[4687]: E1125 09:59:35.754496 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 09:59:48 crc kubenswrapper[4687]: I1125 09:59:48.735100 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 09:59:48 crc kubenswrapper[4687]: E1125 09:59:48.736865 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.148326 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch"] Nov 25 10:00:00 crc kubenswrapper[4687]: E1125 10:00:00.150355 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.150437 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4687]: E1125 10:00:00.150545 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerName="extract-utilities" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.150614 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerName="extract-utilities" Nov 25 10:00:00 crc kubenswrapper[4687]: E1125 10:00:00.150683 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerName="extract-content" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.150739 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerName="extract-content" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.150982 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb38e708-1b5b-4279-b52f-2814c1b276b0" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.151720 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.157261 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.157751 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.162364 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch"] Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.287831 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz4kq\" (UniqueName: \"kubernetes.io/projected/604ed4d8-e608-4fc9-84c4-4bcf63026974-kube-api-access-rz4kq\") pod \"collect-profiles-29401080-wx9ch\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.287912 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/604ed4d8-e608-4fc9-84c4-4bcf63026974-secret-volume\") pod \"collect-profiles-29401080-wx9ch\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.288038 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/604ed4d8-e608-4fc9-84c4-4bcf63026974-config-volume\") pod \"collect-profiles-29401080-wx9ch\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.389427 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/604ed4d8-e608-4fc9-84c4-4bcf63026974-secret-volume\") pod \"collect-profiles-29401080-wx9ch\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.389589 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/604ed4d8-e608-4fc9-84c4-4bcf63026974-config-volume\") pod \"collect-profiles-29401080-wx9ch\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.389731 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz4kq\" (UniqueName: \"kubernetes.io/projected/604ed4d8-e608-4fc9-84c4-4bcf63026974-kube-api-access-rz4kq\") pod \"collect-profiles-29401080-wx9ch\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.390658 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/604ed4d8-e608-4fc9-84c4-4bcf63026974-config-volume\") pod \"collect-profiles-29401080-wx9ch\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.395234 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/604ed4d8-e608-4fc9-84c4-4bcf63026974-secret-volume\") pod \"collect-profiles-29401080-wx9ch\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.408755 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz4kq\" (UniqueName: \"kubernetes.io/projected/604ed4d8-e608-4fc9-84c4-4bcf63026974-kube-api-access-rz4kq\") pod \"collect-profiles-29401080-wx9ch\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.472692 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:00 crc kubenswrapper[4687]: I1125 10:00:00.933286 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch"] Nov 25 10:00:01 crc kubenswrapper[4687]: I1125 10:00:01.315386 4687 generic.go:334] "Generic (PLEG): container finished" podID="604ed4d8-e608-4fc9-84c4-4bcf63026974" containerID="c7dfbe518e50a026ef07d5521311b77d8b37f4005affabd0514b5300d50e9cb3" exitCode=0 Nov 25 10:00:01 crc kubenswrapper[4687]: I1125 10:00:01.315545 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" event={"ID":"604ed4d8-e608-4fc9-84c4-4bcf63026974","Type":"ContainerDied","Data":"c7dfbe518e50a026ef07d5521311b77d8b37f4005affabd0514b5300d50e9cb3"} Nov 25 10:00:01 crc kubenswrapper[4687]: I1125 10:00:01.315879 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" event={"ID":"604ed4d8-e608-4fc9-84c4-4bcf63026974","Type":"ContainerStarted","Data":"68c00ecab17220e1e84f65e8f0fed3eae805dbae1e552e483fb64ecf0fe2d205"} Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.730021 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.734886 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 10:00:02 crc kubenswrapper[4687]: E1125 10:00:02.735324 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.848135 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/604ed4d8-e608-4fc9-84c4-4bcf63026974-config-volume\") pod \"604ed4d8-e608-4fc9-84c4-4bcf63026974\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.848217 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/604ed4d8-e608-4fc9-84c4-4bcf63026974-secret-volume\") pod \"604ed4d8-e608-4fc9-84c4-4bcf63026974\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.848254 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rz4kq\" (UniqueName: \"kubernetes.io/projected/604ed4d8-e608-4fc9-84c4-4bcf63026974-kube-api-access-rz4kq\") pod \"604ed4d8-e608-4fc9-84c4-4bcf63026974\" (UID: \"604ed4d8-e608-4fc9-84c4-4bcf63026974\") " Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.849044 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/604ed4d8-e608-4fc9-84c4-4bcf63026974-config-volume" (OuterVolumeSpecName: "config-volume") pod "604ed4d8-e608-4fc9-84c4-4bcf63026974" (UID: "604ed4d8-e608-4fc9-84c4-4bcf63026974"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.866800 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/604ed4d8-e608-4fc9-84c4-4bcf63026974-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "604ed4d8-e608-4fc9-84c4-4bcf63026974" (UID: "604ed4d8-e608-4fc9-84c4-4bcf63026974"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.867314 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/604ed4d8-e608-4fc9-84c4-4bcf63026974-kube-api-access-rz4kq" (OuterVolumeSpecName: "kube-api-access-rz4kq") pod "604ed4d8-e608-4fc9-84c4-4bcf63026974" (UID: "604ed4d8-e608-4fc9-84c4-4bcf63026974"). InnerVolumeSpecName "kube-api-access-rz4kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.951208 4687 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/604ed4d8-e608-4fc9-84c4-4bcf63026974-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.951250 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rz4kq\" (UniqueName: \"kubernetes.io/projected/604ed4d8-e608-4fc9-84c4-4bcf63026974-kube-api-access-rz4kq\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:02 crc kubenswrapper[4687]: I1125 10:00:02.951263 4687 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/604ed4d8-e608-4fc9-84c4-4bcf63026974-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:03 crc kubenswrapper[4687]: I1125 10:00:03.334174 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" event={"ID":"604ed4d8-e608-4fc9-84c4-4bcf63026974","Type":"ContainerDied","Data":"68c00ecab17220e1e84f65e8f0fed3eae805dbae1e552e483fb64ecf0fe2d205"} Nov 25 10:00:03 crc kubenswrapper[4687]: I1125 10:00:03.334208 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68c00ecab17220e1e84f65e8f0fed3eae805dbae1e552e483fb64ecf0fe2d205" Nov 25 10:00:03 crc kubenswrapper[4687]: I1125 10:00:03.334234 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wx9ch" Nov 25 10:00:03 crc kubenswrapper[4687]: I1125 10:00:03.808619 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk"] Nov 25 10:00:03 crc kubenswrapper[4687]: I1125 10:00:03.817874 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401035-dkrvk"] Nov 25 10:00:05 crc kubenswrapper[4687]: I1125 10:00:05.747792 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5883acaa-0d39-4b0d-ac93-b746fdb143a9" path="/var/lib/kubelet/pods/5883acaa-0d39-4b0d-ac93-b746fdb143a9/volumes" Nov 25 10:00:14 crc kubenswrapper[4687]: I1125 10:00:14.734434 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 10:00:14 crc kubenswrapper[4687]: E1125 10:00:14.735169 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:00:29 crc kubenswrapper[4687]: I1125 10:00:29.735831 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 10:00:30 crc kubenswrapper[4687]: I1125 10:00:30.588987 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"664d21be4e75ad4b02c7ff82884f9c15b596c7e753f3267df4c71202095aa0a7"} Nov 25 10:00:52 crc kubenswrapper[4687]: I1125 10:00:52.634816 4687 scope.go:117] "RemoveContainer" containerID="d24ee0c9a9df572297e7bfea8eeea9f97bf6370a766d31bfcb34e54342146249" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.193060 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401081-f9gz7"] Nov 25 10:01:00 crc kubenswrapper[4687]: E1125 10:01:00.194179 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604ed4d8-e608-4fc9-84c4-4bcf63026974" containerName="collect-profiles" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.194198 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="604ed4d8-e608-4fc9-84c4-4bcf63026974" containerName="collect-profiles" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.194427 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="604ed4d8-e608-4fc9-84c4-4bcf63026974" containerName="collect-profiles" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.195228 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.209264 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401081-f9gz7"] Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.220400 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpdvw\" (UniqueName: \"kubernetes.io/projected/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-kube-api-access-bpdvw\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.220560 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-config-data\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.220598 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-combined-ca-bundle\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.220717 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-fernet-keys\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.323407 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpdvw\" (UniqueName: \"kubernetes.io/projected/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-kube-api-access-bpdvw\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.323564 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-config-data\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.323603 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-combined-ca-bundle\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.323724 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-fernet-keys\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.329944 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-combined-ca-bundle\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.330103 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-fernet-keys\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.334568 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-config-data\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.344778 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpdvw\" (UniqueName: \"kubernetes.io/projected/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-kube-api-access-bpdvw\") pod \"keystone-cron-29401081-f9gz7\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.514893 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:00 crc kubenswrapper[4687]: I1125 10:01:00.986202 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401081-f9gz7"] Nov 25 10:01:01 crc kubenswrapper[4687]: I1125 10:01:01.904870 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401081-f9gz7" event={"ID":"0fce13d2-b073-4d13-ae67-6a4a079ae3f1","Type":"ContainerStarted","Data":"20bc8bfab563c8331b24fc27a9a7652a3522db0624c084c4783d1cd8d612bc22"} Nov 25 10:01:01 crc kubenswrapper[4687]: I1125 10:01:01.905201 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401081-f9gz7" event={"ID":"0fce13d2-b073-4d13-ae67-6a4a079ae3f1","Type":"ContainerStarted","Data":"17226e47787c1620d078e0268096f46b8a75232874467d2a40b5927c10f732eb"} Nov 25 10:01:01 crc kubenswrapper[4687]: I1125 10:01:01.940416 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401081-f9gz7" podStartSLOduration=1.9403927680000002 podStartE2EDuration="1.940392768s" podCreationTimestamp="2025-11-25 10:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:01.930227882 +0000 UTC m=+3456.983867600" watchObservedRunningTime="2025-11-25 10:01:01.940392768 +0000 UTC m=+3456.994032486" Nov 25 10:01:03 crc kubenswrapper[4687]: I1125 10:01:03.933089 4687 generic.go:334] "Generic (PLEG): container finished" podID="0fce13d2-b073-4d13-ae67-6a4a079ae3f1" containerID="20bc8bfab563c8331b24fc27a9a7652a3522db0624c084c4783d1cd8d612bc22" exitCode=0 Nov 25 10:01:03 crc kubenswrapper[4687]: I1125 10:01:03.933265 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401081-f9gz7" event={"ID":"0fce13d2-b073-4d13-ae67-6a4a079ae3f1","Type":"ContainerDied","Data":"20bc8bfab563c8331b24fc27a9a7652a3522db0624c084c4783d1cd8d612bc22"} Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.303029 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.430267 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-fernet-keys\") pod \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.430407 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-combined-ca-bundle\") pod \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.430491 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-config-data\") pod \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.430617 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpdvw\" (UniqueName: \"kubernetes.io/projected/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-kube-api-access-bpdvw\") pod \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\" (UID: \"0fce13d2-b073-4d13-ae67-6a4a079ae3f1\") " Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.437163 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-kube-api-access-bpdvw" (OuterVolumeSpecName: "kube-api-access-bpdvw") pod "0fce13d2-b073-4d13-ae67-6a4a079ae3f1" (UID: "0fce13d2-b073-4d13-ae67-6a4a079ae3f1"). InnerVolumeSpecName "kube-api-access-bpdvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.437699 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "0fce13d2-b073-4d13-ae67-6a4a079ae3f1" (UID: "0fce13d2-b073-4d13-ae67-6a4a079ae3f1"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.470630 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0fce13d2-b073-4d13-ae67-6a4a079ae3f1" (UID: "0fce13d2-b073-4d13-ae67-6a4a079ae3f1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.491130 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-config-data" (OuterVolumeSpecName: "config-data") pod "0fce13d2-b073-4d13-ae67-6a4a079ae3f1" (UID: "0fce13d2-b073-4d13-ae67-6a4a079ae3f1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.533636 4687 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.533684 4687 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.533704 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.533717 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpdvw\" (UniqueName: \"kubernetes.io/projected/0fce13d2-b073-4d13-ae67-6a4a079ae3f1-kube-api-access-bpdvw\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.951627 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401081-f9gz7" event={"ID":"0fce13d2-b073-4d13-ae67-6a4a079ae3f1","Type":"ContainerDied","Data":"17226e47787c1620d078e0268096f46b8a75232874467d2a40b5927c10f732eb"} Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.951676 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17226e47787c1620d078e0268096f46b8a75232874467d2a40b5927c10f732eb" Nov 25 10:01:05 crc kubenswrapper[4687]: I1125 10:01:05.951673 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401081-f9gz7" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.375948 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kw5c8"] Nov 25 10:01:15 crc kubenswrapper[4687]: E1125 10:01:15.378075 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fce13d2-b073-4d13-ae67-6a4a079ae3f1" containerName="keystone-cron" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.378178 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fce13d2-b073-4d13-ae67-6a4a079ae3f1" containerName="keystone-cron" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.378488 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fce13d2-b073-4d13-ae67-6a4a079ae3f1" containerName="keystone-cron" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.380161 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.393001 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw5c8"] Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.528153 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-catalog-content\") pod \"redhat-marketplace-kw5c8\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.528357 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-utilities\") pod \"redhat-marketplace-kw5c8\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.528412 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thhrj\" (UniqueName: \"kubernetes.io/projected/3dd86fd7-c458-46ae-b11c-8d81136f1148-kube-api-access-thhrj\") pod \"redhat-marketplace-kw5c8\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.630282 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-utilities\") pod \"redhat-marketplace-kw5c8\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.630354 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thhrj\" (UniqueName: \"kubernetes.io/projected/3dd86fd7-c458-46ae-b11c-8d81136f1148-kube-api-access-thhrj\") pod \"redhat-marketplace-kw5c8\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.630422 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-catalog-content\") pod \"redhat-marketplace-kw5c8\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.630948 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-catalog-content\") pod \"redhat-marketplace-kw5c8\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.631185 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-utilities\") pod \"redhat-marketplace-kw5c8\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.662728 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thhrj\" (UniqueName: \"kubernetes.io/projected/3dd86fd7-c458-46ae-b11c-8d81136f1148-kube-api-access-thhrj\") pod \"redhat-marketplace-kw5c8\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:15 crc kubenswrapper[4687]: I1125 10:01:15.716041 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:16 crc kubenswrapper[4687]: I1125 10:01:16.219880 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw5c8"] Nov 25 10:01:16 crc kubenswrapper[4687]: W1125 10:01:16.222683 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dd86fd7_c458_46ae_b11c_8d81136f1148.slice/crio-9e9ede26c1725fc0a62b4664e27d20f6980960b76d71ca25668b6d3dd6aa496b WatchSource:0}: Error finding container 9e9ede26c1725fc0a62b4664e27d20f6980960b76d71ca25668b6d3dd6aa496b: Status 404 returned error can't find the container with id 9e9ede26c1725fc0a62b4664e27d20f6980960b76d71ca25668b6d3dd6aa496b Nov 25 10:01:17 crc kubenswrapper[4687]: I1125 10:01:17.059356 4687 generic.go:334] "Generic (PLEG): container finished" podID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerID="f2df2de15b7410663e394f3a2d7404a01715c548874f40b2c6c7c0db771f6156" exitCode=0 Nov 25 10:01:17 crc kubenswrapper[4687]: I1125 10:01:17.059430 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw5c8" event={"ID":"3dd86fd7-c458-46ae-b11c-8d81136f1148","Type":"ContainerDied","Data":"f2df2de15b7410663e394f3a2d7404a01715c548874f40b2c6c7c0db771f6156"} Nov 25 10:01:17 crc kubenswrapper[4687]: I1125 10:01:17.059666 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw5c8" event={"ID":"3dd86fd7-c458-46ae-b11c-8d81136f1148","Type":"ContainerStarted","Data":"9e9ede26c1725fc0a62b4664e27d20f6980960b76d71ca25668b6d3dd6aa496b"} Nov 25 10:01:19 crc kubenswrapper[4687]: I1125 10:01:19.096282 4687 generic.go:334] "Generic (PLEG): container finished" podID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerID="85f2d8c893ebecee940ae4e5d4a2eceec6b16c02c06fad88a4586654a35f15f5" exitCode=0 Nov 25 10:01:19 crc kubenswrapper[4687]: I1125 10:01:19.096394 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw5c8" event={"ID":"3dd86fd7-c458-46ae-b11c-8d81136f1148","Type":"ContainerDied","Data":"85f2d8c893ebecee940ae4e5d4a2eceec6b16c02c06fad88a4586654a35f15f5"} Nov 25 10:01:20 crc kubenswrapper[4687]: I1125 10:01:20.111298 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw5c8" event={"ID":"3dd86fd7-c458-46ae-b11c-8d81136f1148","Type":"ContainerStarted","Data":"3996cf600fb2a844aaaf73870e00f10788aad2ba3c998376f4dd590e444f11c2"} Nov 25 10:01:20 crc kubenswrapper[4687]: I1125 10:01:20.136688 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kw5c8" podStartSLOduration=2.401435305 podStartE2EDuration="5.136660132s" podCreationTimestamp="2025-11-25 10:01:15 +0000 UTC" firstStartedPulling="2025-11-25 10:01:17.061329821 +0000 UTC m=+3472.114969539" lastFinishedPulling="2025-11-25 10:01:19.796554648 +0000 UTC m=+3474.850194366" observedRunningTime="2025-11-25 10:01:20.129022935 +0000 UTC m=+3475.182662653" watchObservedRunningTime="2025-11-25 10:01:20.136660132 +0000 UTC m=+3475.190299850" Nov 25 10:01:25 crc kubenswrapper[4687]: I1125 10:01:25.717522 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:25 crc kubenswrapper[4687]: I1125 10:01:25.718135 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:25 crc kubenswrapper[4687]: I1125 10:01:25.801599 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.233547 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.292073 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qxvk7"] Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.295570 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.301451 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qxvk7"] Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.466655 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m586\" (UniqueName: \"kubernetes.io/projected/4cbdad2b-6deb-419b-94b3-72669433b276-kube-api-access-6m586\") pod \"certified-operators-qxvk7\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.466732 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-catalog-content\") pod \"certified-operators-qxvk7\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.466759 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-utilities\") pod \"certified-operators-qxvk7\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.568225 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m586\" (UniqueName: \"kubernetes.io/projected/4cbdad2b-6deb-419b-94b3-72669433b276-kube-api-access-6m586\") pod \"certified-operators-qxvk7\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.568306 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-catalog-content\") pod \"certified-operators-qxvk7\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.568321 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-utilities\") pod \"certified-operators-qxvk7\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.568908 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-utilities\") pod \"certified-operators-qxvk7\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.569239 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-catalog-content\") pod \"certified-operators-qxvk7\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.603938 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m586\" (UniqueName: \"kubernetes.io/projected/4cbdad2b-6deb-419b-94b3-72669433b276-kube-api-access-6m586\") pod \"certified-operators-qxvk7\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:26 crc kubenswrapper[4687]: I1125 10:01:26.633213 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:27 crc kubenswrapper[4687]: I1125 10:01:27.139756 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qxvk7"] Nov 25 10:01:27 crc kubenswrapper[4687]: W1125 10:01:27.148624 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4cbdad2b_6deb_419b_94b3_72669433b276.slice/crio-99932b489e6c8a01e7cb7bf37d7d2fa4587acccb785571bbdc2d3a0fd13233a6 WatchSource:0}: Error finding container 99932b489e6c8a01e7cb7bf37d7d2fa4587acccb785571bbdc2d3a0fd13233a6: Status 404 returned error can't find the container with id 99932b489e6c8a01e7cb7bf37d7d2fa4587acccb785571bbdc2d3a0fd13233a6 Nov 25 10:01:27 crc kubenswrapper[4687]: I1125 10:01:27.183801 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qxvk7" event={"ID":"4cbdad2b-6deb-419b-94b3-72669433b276","Type":"ContainerStarted","Data":"99932b489e6c8a01e7cb7bf37d7d2fa4587acccb785571bbdc2d3a0fd13233a6"} Nov 25 10:01:28 crc kubenswrapper[4687]: I1125 10:01:28.195736 4687 generic.go:334] "Generic (PLEG): container finished" podID="4cbdad2b-6deb-419b-94b3-72669433b276" containerID="4a35dea7d3ea60250acca5efb78783e377ebb67cd7e929f2aee1dcedf84f7abc" exitCode=0 Nov 25 10:01:28 crc kubenswrapper[4687]: I1125 10:01:28.195848 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qxvk7" event={"ID":"4cbdad2b-6deb-419b-94b3-72669433b276","Type":"ContainerDied","Data":"4a35dea7d3ea60250acca5efb78783e377ebb67cd7e929f2aee1dcedf84f7abc"} Nov 25 10:01:29 crc kubenswrapper[4687]: I1125 10:01:29.207110 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qxvk7" event={"ID":"4cbdad2b-6deb-419b-94b3-72669433b276","Type":"ContainerStarted","Data":"6950f73c2b69ba374a0989a5432ca49d9eb16b85ac0603b6d618096bf51e0067"} Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.067784 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw5c8"] Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.068140 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kw5c8" podUID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerName="registry-server" containerID="cri-o://3996cf600fb2a844aaaf73870e00f10788aad2ba3c998376f4dd590e444f11c2" gracePeriod=2 Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.218453 4687 generic.go:334] "Generic (PLEG): container finished" podID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerID="3996cf600fb2a844aaaf73870e00f10788aad2ba3c998376f4dd590e444f11c2" exitCode=0 Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.218550 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw5c8" event={"ID":"3dd86fd7-c458-46ae-b11c-8d81136f1148","Type":"ContainerDied","Data":"3996cf600fb2a844aaaf73870e00f10788aad2ba3c998376f4dd590e444f11c2"} Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.220999 4687 generic.go:334] "Generic (PLEG): container finished" podID="4cbdad2b-6deb-419b-94b3-72669433b276" containerID="6950f73c2b69ba374a0989a5432ca49d9eb16b85ac0603b6d618096bf51e0067" exitCode=0 Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.221043 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qxvk7" event={"ID":"4cbdad2b-6deb-419b-94b3-72669433b276","Type":"ContainerDied","Data":"6950f73c2b69ba374a0989a5432ca49d9eb16b85ac0603b6d618096bf51e0067"} Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.577682 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.758332 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-utilities\") pod \"3dd86fd7-c458-46ae-b11c-8d81136f1148\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.758442 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thhrj\" (UniqueName: \"kubernetes.io/projected/3dd86fd7-c458-46ae-b11c-8d81136f1148-kube-api-access-thhrj\") pod \"3dd86fd7-c458-46ae-b11c-8d81136f1148\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.758544 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-catalog-content\") pod \"3dd86fd7-c458-46ae-b11c-8d81136f1148\" (UID: \"3dd86fd7-c458-46ae-b11c-8d81136f1148\") " Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.759256 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-utilities" (OuterVolumeSpecName: "utilities") pod "3dd86fd7-c458-46ae-b11c-8d81136f1148" (UID: "3dd86fd7-c458-46ae-b11c-8d81136f1148"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.764744 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dd86fd7-c458-46ae-b11c-8d81136f1148-kube-api-access-thhrj" (OuterVolumeSpecName: "kube-api-access-thhrj") pod "3dd86fd7-c458-46ae-b11c-8d81136f1148" (UID: "3dd86fd7-c458-46ae-b11c-8d81136f1148"). InnerVolumeSpecName "kube-api-access-thhrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.786679 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3dd86fd7-c458-46ae-b11c-8d81136f1148" (UID: "3dd86fd7-c458-46ae-b11c-8d81136f1148"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.860789 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.860838 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thhrj\" (UniqueName: \"kubernetes.io/projected/3dd86fd7-c458-46ae-b11c-8d81136f1148-kube-api-access-thhrj\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:30 crc kubenswrapper[4687]: I1125 10:01:30.860854 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd86fd7-c458-46ae-b11c-8d81136f1148-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:31 crc kubenswrapper[4687]: I1125 10:01:31.232249 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw5c8" event={"ID":"3dd86fd7-c458-46ae-b11c-8d81136f1148","Type":"ContainerDied","Data":"9e9ede26c1725fc0a62b4664e27d20f6980960b76d71ca25668b6d3dd6aa496b"} Nov 25 10:01:31 crc kubenswrapper[4687]: I1125 10:01:31.232279 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw5c8" Nov 25 10:01:31 crc kubenswrapper[4687]: I1125 10:01:31.232315 4687 scope.go:117] "RemoveContainer" containerID="3996cf600fb2a844aaaf73870e00f10788aad2ba3c998376f4dd590e444f11c2" Nov 25 10:01:31 crc kubenswrapper[4687]: I1125 10:01:31.234677 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qxvk7" event={"ID":"4cbdad2b-6deb-419b-94b3-72669433b276","Type":"ContainerStarted","Data":"9d8f9d12043203d52ce387e05b7182595439e9fccc4e5583e97d1b56a193b696"} Nov 25 10:01:31 crc kubenswrapper[4687]: I1125 10:01:31.250615 4687 scope.go:117] "RemoveContainer" containerID="85f2d8c893ebecee940ae4e5d4a2eceec6b16c02c06fad88a4586654a35f15f5" Nov 25 10:01:31 crc kubenswrapper[4687]: I1125 10:01:31.286437 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qxvk7" podStartSLOduration=2.825591932 podStartE2EDuration="5.286418858s" podCreationTimestamp="2025-11-25 10:01:26 +0000 UTC" firstStartedPulling="2025-11-25 10:01:28.198261199 +0000 UTC m=+3483.251900917" lastFinishedPulling="2025-11-25 10:01:30.659088125 +0000 UTC m=+3485.712727843" observedRunningTime="2025-11-25 10:01:31.269764657 +0000 UTC m=+3486.323404375" watchObservedRunningTime="2025-11-25 10:01:31.286418858 +0000 UTC m=+3486.340058586" Nov 25 10:01:31 crc kubenswrapper[4687]: I1125 10:01:31.293352 4687 scope.go:117] "RemoveContainer" containerID="f2df2de15b7410663e394f3a2d7404a01715c548874f40b2c6c7c0db771f6156" Nov 25 10:01:31 crc kubenswrapper[4687]: I1125 10:01:31.297730 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw5c8"] Nov 25 10:01:31 crc kubenswrapper[4687]: I1125 10:01:31.307488 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw5c8"] Nov 25 10:01:31 crc kubenswrapper[4687]: I1125 10:01:31.747865 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dd86fd7-c458-46ae-b11c-8d81136f1148" path="/var/lib/kubelet/pods/3dd86fd7-c458-46ae-b11c-8d81136f1148/volumes" Nov 25 10:01:36 crc kubenswrapper[4687]: I1125 10:01:36.634256 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:36 crc kubenswrapper[4687]: I1125 10:01:36.634885 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:36 crc kubenswrapper[4687]: I1125 10:01:36.681847 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:37 crc kubenswrapper[4687]: I1125 10:01:37.363961 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:38 crc kubenswrapper[4687]: I1125 10:01:38.873703 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qxvk7"] Nov 25 10:01:39 crc kubenswrapper[4687]: I1125 10:01:39.334554 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qxvk7" podUID="4cbdad2b-6deb-419b-94b3-72669433b276" containerName="registry-server" containerID="cri-o://9d8f9d12043203d52ce387e05b7182595439e9fccc4e5583e97d1b56a193b696" gracePeriod=2 Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.347292 4687 generic.go:334] "Generic (PLEG): container finished" podID="4cbdad2b-6deb-419b-94b3-72669433b276" containerID="9d8f9d12043203d52ce387e05b7182595439e9fccc4e5583e97d1b56a193b696" exitCode=0 Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.347374 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qxvk7" event={"ID":"4cbdad2b-6deb-419b-94b3-72669433b276","Type":"ContainerDied","Data":"9d8f9d12043203d52ce387e05b7182595439e9fccc4e5583e97d1b56a193b696"} Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.347542 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qxvk7" event={"ID":"4cbdad2b-6deb-419b-94b3-72669433b276","Type":"ContainerDied","Data":"99932b489e6c8a01e7cb7bf37d7d2fa4587acccb785571bbdc2d3a0fd13233a6"} Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.347559 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99932b489e6c8a01e7cb7bf37d7d2fa4587acccb785571bbdc2d3a0fd13233a6" Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.370934 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.549797 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-catalog-content\") pod \"4cbdad2b-6deb-419b-94b3-72669433b276\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.549857 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6m586\" (UniqueName: \"kubernetes.io/projected/4cbdad2b-6deb-419b-94b3-72669433b276-kube-api-access-6m586\") pod \"4cbdad2b-6deb-419b-94b3-72669433b276\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.550044 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-utilities\") pod \"4cbdad2b-6deb-419b-94b3-72669433b276\" (UID: \"4cbdad2b-6deb-419b-94b3-72669433b276\") " Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.551180 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-utilities" (OuterVolumeSpecName: "utilities") pod "4cbdad2b-6deb-419b-94b3-72669433b276" (UID: "4cbdad2b-6deb-419b-94b3-72669433b276"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.557513 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cbdad2b-6deb-419b-94b3-72669433b276-kube-api-access-6m586" (OuterVolumeSpecName: "kube-api-access-6m586") pod "4cbdad2b-6deb-419b-94b3-72669433b276" (UID: "4cbdad2b-6deb-419b-94b3-72669433b276"). InnerVolumeSpecName "kube-api-access-6m586". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.596735 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4cbdad2b-6deb-419b-94b3-72669433b276" (UID: "4cbdad2b-6deb-419b-94b3-72669433b276"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.652695 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.652964 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4cbdad2b-6deb-419b-94b3-72669433b276-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:40 crc kubenswrapper[4687]: I1125 10:01:40.653045 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6m586\" (UniqueName: \"kubernetes.io/projected/4cbdad2b-6deb-419b-94b3-72669433b276-kube-api-access-6m586\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:41 crc kubenswrapper[4687]: I1125 10:01:41.358482 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qxvk7" Nov 25 10:01:41 crc kubenswrapper[4687]: I1125 10:01:41.393110 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qxvk7"] Nov 25 10:01:41 crc kubenswrapper[4687]: I1125 10:01:41.403435 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qxvk7"] Nov 25 10:01:41 crc kubenswrapper[4687]: I1125 10:01:41.748619 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cbdad2b-6deb-419b-94b3-72669433b276" path="/var/lib/kubelet/pods/4cbdad2b-6deb-419b-94b3-72669433b276/volumes" Nov 25 10:02:53 crc kubenswrapper[4687]: I1125 10:02:53.844730 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:02:53 crc kubenswrapper[4687]: I1125 10:02:53.845231 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:03:23 crc kubenswrapper[4687]: I1125 10:03:23.844538 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:03:23 crc kubenswrapper[4687]: I1125 10:03:23.845101 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:03:39 crc kubenswrapper[4687]: I1125 10:03:39.789491 4687 generic.go:334] "Generic (PLEG): container finished" podID="f60c7882-f90a-4cfd-93a4-1cf51c29315a" containerID="4a5aa9f8af56508ffb87235cdc35f71cc5e581c521b486d0125c19eea9a56024" exitCode=0 Nov 25 10:03:39 crc kubenswrapper[4687]: I1125 10:03:39.789608 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"f60c7882-f90a-4cfd-93a4-1cf51c29315a","Type":"ContainerDied","Data":"4a5aa9f8af56508ffb87235cdc35f71cc5e581c521b486d0125c19eea9a56024"} Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.151310 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.248065 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnkg9\" (UniqueName: \"kubernetes.io/projected/f60c7882-f90a-4cfd-93a4-1cf51c29315a-kube-api-access-jnkg9\") pod \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.248542 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-temporary\") pod \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.248623 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config\") pod \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.248704 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.248831 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config-secret\") pod \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.248875 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-workdir\") pod \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.248908 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ssh-key\") pod \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.249050 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-config-data\") pod \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.249163 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ca-certs\") pod \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\" (UID: \"f60c7882-f90a-4cfd-93a4-1cf51c29315a\") " Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.250016 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "f60c7882-f90a-4cfd-93a4-1cf51c29315a" (UID: "f60c7882-f90a-4cfd-93a4-1cf51c29315a"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.250191 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-config-data" (OuterVolumeSpecName: "config-data") pod "f60c7882-f90a-4cfd-93a4-1cf51c29315a" (UID: "f60c7882-f90a-4cfd-93a4-1cf51c29315a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.254718 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "test-operator-logs") pod "f60c7882-f90a-4cfd-93a4-1cf51c29315a" (UID: "f60c7882-f90a-4cfd-93a4-1cf51c29315a"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.256697 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "f60c7882-f90a-4cfd-93a4-1cf51c29315a" (UID: "f60c7882-f90a-4cfd-93a4-1cf51c29315a"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.256854 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f60c7882-f90a-4cfd-93a4-1cf51c29315a-kube-api-access-jnkg9" (OuterVolumeSpecName: "kube-api-access-jnkg9") pod "f60c7882-f90a-4cfd-93a4-1cf51c29315a" (UID: "f60c7882-f90a-4cfd-93a4-1cf51c29315a"). InnerVolumeSpecName "kube-api-access-jnkg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.275368 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f60c7882-f90a-4cfd-93a4-1cf51c29315a" (UID: "f60c7882-f90a-4cfd-93a4-1cf51c29315a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.283837 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "f60c7882-f90a-4cfd-93a4-1cf51c29315a" (UID: "f60c7882-f90a-4cfd-93a4-1cf51c29315a"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.284934 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "f60c7882-f90a-4cfd-93a4-1cf51c29315a" (UID: "f60c7882-f90a-4cfd-93a4-1cf51c29315a"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.312995 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "f60c7882-f90a-4cfd-93a4-1cf51c29315a" (UID: "f60c7882-f90a-4cfd-93a4-1cf51c29315a"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.352224 4687 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.352555 4687 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.352688 4687 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.352804 4687 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.352919 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnkg9\" (UniqueName: \"kubernetes.io/projected/f60c7882-f90a-4cfd-93a4-1cf51c29315a-kube-api-access-jnkg9\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.353059 4687 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/f60c7882-f90a-4cfd-93a4-1cf51c29315a-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.353174 4687 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.353344 4687 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.353471 4687 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f60c7882-f90a-4cfd-93a4-1cf51c29315a-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.373526 4687 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.455343 4687 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.813110 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"f60c7882-f90a-4cfd-93a4-1cf51c29315a","Type":"ContainerDied","Data":"9df11e75322447f742e9fb589e9d640c7691191af2d000d74667d0911b42704f"} Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.813150 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9df11e75322447f742e9fb589e9d640c7691191af2d000d74667d0911b42704f" Nov 25 10:03:41 crc kubenswrapper[4687]: I1125 10:03:41.813206 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.475396 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 10:03:45 crc kubenswrapper[4687]: E1125 10:03:45.476947 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerName="extract-content" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.476974 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerName="extract-content" Nov 25 10:03:45 crc kubenswrapper[4687]: E1125 10:03:45.477009 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cbdad2b-6deb-419b-94b3-72669433b276" containerName="extract-utilities" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.477019 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cbdad2b-6deb-419b-94b3-72669433b276" containerName="extract-utilities" Nov 25 10:03:45 crc kubenswrapper[4687]: E1125 10:03:45.477036 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerName="extract-utilities" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.477047 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerName="extract-utilities" Nov 25 10:03:45 crc kubenswrapper[4687]: E1125 10:03:45.477073 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f60c7882-f90a-4cfd-93a4-1cf51c29315a" containerName="tempest-tests-tempest-tests-runner" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.477083 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="f60c7882-f90a-4cfd-93a4-1cf51c29315a" containerName="tempest-tests-tempest-tests-runner" Nov 25 10:03:45 crc kubenswrapper[4687]: E1125 10:03:45.477112 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerName="registry-server" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.477121 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerName="registry-server" Nov 25 10:03:45 crc kubenswrapper[4687]: E1125 10:03:45.477130 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cbdad2b-6deb-419b-94b3-72669433b276" containerName="extract-content" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.477137 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cbdad2b-6deb-419b-94b3-72669433b276" containerName="extract-content" Nov 25 10:03:45 crc kubenswrapper[4687]: E1125 10:03:45.477158 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cbdad2b-6deb-419b-94b3-72669433b276" containerName="registry-server" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.477168 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cbdad2b-6deb-419b-94b3-72669433b276" containerName="registry-server" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.477399 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dd86fd7-c458-46ae-b11c-8d81136f1148" containerName="registry-server" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.477421 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cbdad2b-6deb-419b-94b3-72669433b276" containerName="registry-server" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.477442 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="f60c7882-f90a-4cfd-93a4-1cf51c29315a" containerName="tempest-tests-tempest-tests-runner" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.478273 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.480623 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-w2rpv" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.487377 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.537453 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5799d\" (UniqueName: \"kubernetes.io/projected/b6d8db87-1023-4e20-99ca-6a755dc19fe3-kube-api-access-5799d\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b6d8db87-1023-4e20-99ca-6a755dc19fe3\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.537554 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b6d8db87-1023-4e20-99ca-6a755dc19fe3\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.638878 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5799d\" (UniqueName: \"kubernetes.io/projected/b6d8db87-1023-4e20-99ca-6a755dc19fe3-kube-api-access-5799d\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b6d8db87-1023-4e20-99ca-6a755dc19fe3\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.638923 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b6d8db87-1023-4e20-99ca-6a755dc19fe3\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.639325 4687 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b6d8db87-1023-4e20-99ca-6a755dc19fe3\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.659566 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5799d\" (UniqueName: \"kubernetes.io/projected/b6d8db87-1023-4e20-99ca-6a755dc19fe3-kube-api-access-5799d\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b6d8db87-1023-4e20-99ca-6a755dc19fe3\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.663752 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"b6d8db87-1023-4e20-99ca-6a755dc19fe3\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 10:03:45 crc kubenswrapper[4687]: I1125 10:03:45.805910 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 10:03:46 crc kubenswrapper[4687]: I1125 10:03:46.267192 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 10:03:46 crc kubenswrapper[4687]: I1125 10:03:46.280261 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:03:46 crc kubenswrapper[4687]: I1125 10:03:46.876190 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"b6d8db87-1023-4e20-99ca-6a755dc19fe3","Type":"ContainerStarted","Data":"f0ed0ccf6b638b01c5633668382a98627d01a45f078713c7e7071642bdc95a4e"} Nov 25 10:03:47 crc kubenswrapper[4687]: I1125 10:03:47.887345 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"b6d8db87-1023-4e20-99ca-6a755dc19fe3","Type":"ContainerStarted","Data":"8d6fb69dcc525746025944a43f8824c9a751706383a029f1c31efd540d97b628"} Nov 25 10:03:47 crc kubenswrapper[4687]: I1125 10:03:47.905132 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=1.904177464 podStartE2EDuration="2.905105469s" podCreationTimestamp="2025-11-25 10:03:45 +0000 UTC" firstStartedPulling="2025-11-25 10:03:46.279784509 +0000 UTC m=+3621.333424247" lastFinishedPulling="2025-11-25 10:03:47.280712534 +0000 UTC m=+3622.334352252" observedRunningTime="2025-11-25 10:03:47.902253011 +0000 UTC m=+3622.955892769" watchObservedRunningTime="2025-11-25 10:03:47.905105469 +0000 UTC m=+3622.958745227" Nov 25 10:03:53 crc kubenswrapper[4687]: I1125 10:03:53.845077 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:03:53 crc kubenswrapper[4687]: I1125 10:03:53.845741 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:03:53 crc kubenswrapper[4687]: I1125 10:03:53.845809 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 10:03:53 crc kubenswrapper[4687]: I1125 10:03:53.846937 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"664d21be4e75ad4b02c7ff82884f9c15b596c7e753f3267df4c71202095aa0a7"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:03:53 crc kubenswrapper[4687]: I1125 10:03:53.847042 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://664d21be4e75ad4b02c7ff82884f9c15b596c7e753f3267df4c71202095aa0a7" gracePeriod=600 Nov 25 10:03:54 crc kubenswrapper[4687]: I1125 10:03:54.969064 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="664d21be4e75ad4b02c7ff82884f9c15b596c7e753f3267df4c71202095aa0a7" exitCode=0 Nov 25 10:03:54 crc kubenswrapper[4687]: I1125 10:03:54.969140 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"664d21be4e75ad4b02c7ff82884f9c15b596c7e753f3267df4c71202095aa0a7"} Nov 25 10:03:54 crc kubenswrapper[4687]: I1125 10:03:54.969683 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9"} Nov 25 10:03:54 crc kubenswrapper[4687]: I1125 10:03:54.969723 4687 scope.go:117] "RemoveContainer" containerID="9bf1e771dbee0adf32beeaba77cccb7568fea00e733087dc4d3127b37da82625" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.528852 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rw8df/must-gather-9gb4x"] Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.531014 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/must-gather-9gb4x" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.533116 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rw8df"/"openshift-service-ca.crt" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.533213 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-rw8df"/"default-dockercfg-z669q" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.533913 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rw8df"/"kube-root-ca.crt" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.539203 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rw8df/must-gather-9gb4x"] Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.641170 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-must-gather-output\") pod \"must-gather-9gb4x\" (UID: \"bc9d9172-75a3-4c5b-b0ec-255bc67b4161\") " pod="openshift-must-gather-rw8df/must-gather-9gb4x" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.641385 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2pr4\" (UniqueName: \"kubernetes.io/projected/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-kube-api-access-r2pr4\") pod \"must-gather-9gb4x\" (UID: \"bc9d9172-75a3-4c5b-b0ec-255bc67b4161\") " pod="openshift-must-gather-rw8df/must-gather-9gb4x" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.743477 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-must-gather-output\") pod \"must-gather-9gb4x\" (UID: \"bc9d9172-75a3-4c5b-b0ec-255bc67b4161\") " pod="openshift-must-gather-rw8df/must-gather-9gb4x" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.744082 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-must-gather-output\") pod \"must-gather-9gb4x\" (UID: \"bc9d9172-75a3-4c5b-b0ec-255bc67b4161\") " pod="openshift-must-gather-rw8df/must-gather-9gb4x" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.744110 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2pr4\" (UniqueName: \"kubernetes.io/projected/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-kube-api-access-r2pr4\") pod \"must-gather-9gb4x\" (UID: \"bc9d9172-75a3-4c5b-b0ec-255bc67b4161\") " pod="openshift-must-gather-rw8df/must-gather-9gb4x" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.762359 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2pr4\" (UniqueName: \"kubernetes.io/projected/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-kube-api-access-r2pr4\") pod \"must-gather-9gb4x\" (UID: \"bc9d9172-75a3-4c5b-b0ec-255bc67b4161\") " pod="openshift-must-gather-rw8df/must-gather-9gb4x" Nov 25 10:04:10 crc kubenswrapper[4687]: I1125 10:04:10.853090 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/must-gather-9gb4x" Nov 25 10:04:11 crc kubenswrapper[4687]: I1125 10:04:11.335664 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rw8df/must-gather-9gb4x"] Nov 25 10:04:12 crc kubenswrapper[4687]: I1125 10:04:12.161382 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/must-gather-9gb4x" event={"ID":"bc9d9172-75a3-4c5b-b0ec-255bc67b4161","Type":"ContainerStarted","Data":"bb731c597ee3c31385df3996cfba7ee2e24444dc0cb64f8b40112d8046b7a0ea"} Nov 25 10:04:17 crc kubenswrapper[4687]: I1125 10:04:17.234236 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/must-gather-9gb4x" event={"ID":"bc9d9172-75a3-4c5b-b0ec-255bc67b4161","Type":"ContainerStarted","Data":"7b4307aea17d186b65618435bd26b7260031971493391e4d259be17f253013fc"} Nov 25 10:04:17 crc kubenswrapper[4687]: I1125 10:04:17.234832 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/must-gather-9gb4x" event={"ID":"bc9d9172-75a3-4c5b-b0ec-255bc67b4161","Type":"ContainerStarted","Data":"ea72f561a01420d45a34ff94a7fc7a13aa0101a79a52e8d4106e0f6598b15162"} Nov 25 10:04:17 crc kubenswrapper[4687]: I1125 10:04:17.266202 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rw8df/must-gather-9gb4x" podStartSLOduration=2.053779555 podStartE2EDuration="7.26617395s" podCreationTimestamp="2025-11-25 10:04:10 +0000 UTC" firstStartedPulling="2025-11-25 10:04:11.344076428 +0000 UTC m=+3646.397716146" lastFinishedPulling="2025-11-25 10:04:16.556470823 +0000 UTC m=+3651.610110541" observedRunningTime="2025-11-25 10:04:17.257989869 +0000 UTC m=+3652.311629597" watchObservedRunningTime="2025-11-25 10:04:17.26617395 +0000 UTC m=+3652.319813708" Nov 25 10:04:20 crc kubenswrapper[4687]: I1125 10:04:20.610793 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rw8df/crc-debug-wj44b"] Nov 25 10:04:20 crc kubenswrapper[4687]: I1125 10:04:20.612415 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-wj44b" Nov 25 10:04:20 crc kubenswrapper[4687]: I1125 10:04:20.747362 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a0a4461b-04d1-4372-9c77-da42e6e3b177-host\") pod \"crc-debug-wj44b\" (UID: \"a0a4461b-04d1-4372-9c77-da42e6e3b177\") " pod="openshift-must-gather-rw8df/crc-debug-wj44b" Nov 25 10:04:20 crc kubenswrapper[4687]: I1125 10:04:20.747416 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xt8w8\" (UniqueName: \"kubernetes.io/projected/a0a4461b-04d1-4372-9c77-da42e6e3b177-kube-api-access-xt8w8\") pod \"crc-debug-wj44b\" (UID: \"a0a4461b-04d1-4372-9c77-da42e6e3b177\") " pod="openshift-must-gather-rw8df/crc-debug-wj44b" Nov 25 10:04:20 crc kubenswrapper[4687]: I1125 10:04:20.849546 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a0a4461b-04d1-4372-9c77-da42e6e3b177-host\") pod \"crc-debug-wj44b\" (UID: \"a0a4461b-04d1-4372-9c77-da42e6e3b177\") " pod="openshift-must-gather-rw8df/crc-debug-wj44b" Nov 25 10:04:20 crc kubenswrapper[4687]: I1125 10:04:20.850061 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a0a4461b-04d1-4372-9c77-da42e6e3b177-host\") pod \"crc-debug-wj44b\" (UID: \"a0a4461b-04d1-4372-9c77-da42e6e3b177\") " pod="openshift-must-gather-rw8df/crc-debug-wj44b" Nov 25 10:04:20 crc kubenswrapper[4687]: I1125 10:04:20.850115 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xt8w8\" (UniqueName: \"kubernetes.io/projected/a0a4461b-04d1-4372-9c77-da42e6e3b177-kube-api-access-xt8w8\") pod \"crc-debug-wj44b\" (UID: \"a0a4461b-04d1-4372-9c77-da42e6e3b177\") " pod="openshift-must-gather-rw8df/crc-debug-wj44b" Nov 25 10:04:20 crc kubenswrapper[4687]: I1125 10:04:20.878276 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xt8w8\" (UniqueName: \"kubernetes.io/projected/a0a4461b-04d1-4372-9c77-da42e6e3b177-kube-api-access-xt8w8\") pod \"crc-debug-wj44b\" (UID: \"a0a4461b-04d1-4372-9c77-da42e6e3b177\") " pod="openshift-must-gather-rw8df/crc-debug-wj44b" Nov 25 10:04:20 crc kubenswrapper[4687]: I1125 10:04:20.938008 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-wj44b" Nov 25 10:04:21 crc kubenswrapper[4687]: I1125 10:04:21.282543 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/crc-debug-wj44b" event={"ID":"a0a4461b-04d1-4372-9c77-da42e6e3b177","Type":"ContainerStarted","Data":"abed4400609ebc80ef84d1af0d5f4ea6fb38ca525878dbbee86314df30063ac7"} Nov 25 10:04:33 crc kubenswrapper[4687]: I1125 10:04:33.404040 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/crc-debug-wj44b" event={"ID":"a0a4461b-04d1-4372-9c77-da42e6e3b177","Type":"ContainerStarted","Data":"196ddcefbc40879a83e813dac6aaa76d579f65bada46b75698ae545d90b781ee"} Nov 25 10:04:33 crc kubenswrapper[4687]: I1125 10:04:33.433033 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rw8df/crc-debug-wj44b" podStartSLOduration=1.905166616 podStartE2EDuration="13.433015213s" podCreationTimestamp="2025-11-25 10:04:20 +0000 UTC" firstStartedPulling="2025-11-25 10:04:20.970314651 +0000 UTC m=+3656.023954359" lastFinishedPulling="2025-11-25 10:04:32.498163238 +0000 UTC m=+3667.551802956" observedRunningTime="2025-11-25 10:04:33.420326869 +0000 UTC m=+3668.473966587" watchObservedRunningTime="2025-11-25 10:04:33.433015213 +0000 UTC m=+3668.486654931" Nov 25 10:05:17 crc kubenswrapper[4687]: I1125 10:05:17.841104 4687 generic.go:334] "Generic (PLEG): container finished" podID="a0a4461b-04d1-4372-9c77-da42e6e3b177" containerID="196ddcefbc40879a83e813dac6aaa76d579f65bada46b75698ae545d90b781ee" exitCode=0 Nov 25 10:05:17 crc kubenswrapper[4687]: I1125 10:05:17.841228 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/crc-debug-wj44b" event={"ID":"a0a4461b-04d1-4372-9c77-da42e6e3b177","Type":"ContainerDied","Data":"196ddcefbc40879a83e813dac6aaa76d579f65bada46b75698ae545d90b781ee"} Nov 25 10:05:18 crc kubenswrapper[4687]: I1125 10:05:18.973080 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-wj44b" Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.007547 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rw8df/crc-debug-wj44b"] Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.017153 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rw8df/crc-debug-wj44b"] Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.130989 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a0a4461b-04d1-4372-9c77-da42e6e3b177-host\") pod \"a0a4461b-04d1-4372-9c77-da42e6e3b177\" (UID: \"a0a4461b-04d1-4372-9c77-da42e6e3b177\") " Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.131368 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xt8w8\" (UniqueName: \"kubernetes.io/projected/a0a4461b-04d1-4372-9c77-da42e6e3b177-kube-api-access-xt8w8\") pod \"a0a4461b-04d1-4372-9c77-da42e6e3b177\" (UID: \"a0a4461b-04d1-4372-9c77-da42e6e3b177\") " Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.131115 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a0a4461b-04d1-4372-9c77-da42e6e3b177-host" (OuterVolumeSpecName: "host") pod "a0a4461b-04d1-4372-9c77-da42e6e3b177" (UID: "a0a4461b-04d1-4372-9c77-da42e6e3b177"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.132170 4687 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a0a4461b-04d1-4372-9c77-da42e6e3b177-host\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.137228 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0a4461b-04d1-4372-9c77-da42e6e3b177-kube-api-access-xt8w8" (OuterVolumeSpecName: "kube-api-access-xt8w8") pod "a0a4461b-04d1-4372-9c77-da42e6e3b177" (UID: "a0a4461b-04d1-4372-9c77-da42e6e3b177"). InnerVolumeSpecName "kube-api-access-xt8w8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.234521 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xt8w8\" (UniqueName: \"kubernetes.io/projected/a0a4461b-04d1-4372-9c77-da42e6e3b177-kube-api-access-xt8w8\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.748081 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0a4461b-04d1-4372-9c77-da42e6e3b177" path="/var/lib/kubelet/pods/a0a4461b-04d1-4372-9c77-da42e6e3b177/volumes" Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.860780 4687 scope.go:117] "RemoveContainer" containerID="196ddcefbc40879a83e813dac6aaa76d579f65bada46b75698ae545d90b781ee" Nov 25 10:05:19 crc kubenswrapper[4687]: I1125 10:05:19.860817 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-wj44b" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.165444 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rw8df/crc-debug-xd4sv"] Nov 25 10:05:20 crc kubenswrapper[4687]: E1125 10:05:20.165962 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0a4461b-04d1-4372-9c77-da42e6e3b177" containerName="container-00" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.165978 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0a4461b-04d1-4372-9c77-da42e6e3b177" containerName="container-00" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.166209 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0a4461b-04d1-4372-9c77-da42e6e3b177" containerName="container-00" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.166867 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-xd4sv" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.358118 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgrl6\" (UniqueName: \"kubernetes.io/projected/73d7e557-d965-4e91-b187-a7b3a7eef893-kube-api-access-bgrl6\") pod \"crc-debug-xd4sv\" (UID: \"73d7e557-d965-4e91-b187-a7b3a7eef893\") " pod="openshift-must-gather-rw8df/crc-debug-xd4sv" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.358325 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/73d7e557-d965-4e91-b187-a7b3a7eef893-host\") pod \"crc-debug-xd4sv\" (UID: \"73d7e557-d965-4e91-b187-a7b3a7eef893\") " pod="openshift-must-gather-rw8df/crc-debug-xd4sv" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.460555 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgrl6\" (UniqueName: \"kubernetes.io/projected/73d7e557-d965-4e91-b187-a7b3a7eef893-kube-api-access-bgrl6\") pod \"crc-debug-xd4sv\" (UID: \"73d7e557-d965-4e91-b187-a7b3a7eef893\") " pod="openshift-must-gather-rw8df/crc-debug-xd4sv" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.460777 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/73d7e557-d965-4e91-b187-a7b3a7eef893-host\") pod \"crc-debug-xd4sv\" (UID: \"73d7e557-d965-4e91-b187-a7b3a7eef893\") " pod="openshift-must-gather-rw8df/crc-debug-xd4sv" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.460904 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/73d7e557-d965-4e91-b187-a7b3a7eef893-host\") pod \"crc-debug-xd4sv\" (UID: \"73d7e557-d965-4e91-b187-a7b3a7eef893\") " pod="openshift-must-gather-rw8df/crc-debug-xd4sv" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.484038 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgrl6\" (UniqueName: \"kubernetes.io/projected/73d7e557-d965-4e91-b187-a7b3a7eef893-kube-api-access-bgrl6\") pod \"crc-debug-xd4sv\" (UID: \"73d7e557-d965-4e91-b187-a7b3a7eef893\") " pod="openshift-must-gather-rw8df/crc-debug-xd4sv" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.491867 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-xd4sv" Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.869718 4687 generic.go:334] "Generic (PLEG): container finished" podID="73d7e557-d965-4e91-b187-a7b3a7eef893" containerID="8cdced547d62a4809f127deccb67c0e8217f3344538e996a003487177419d8cf" exitCode=0 Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.869786 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/crc-debug-xd4sv" event={"ID":"73d7e557-d965-4e91-b187-a7b3a7eef893","Type":"ContainerDied","Data":"8cdced547d62a4809f127deccb67c0e8217f3344538e996a003487177419d8cf"} Nov 25 10:05:20 crc kubenswrapper[4687]: I1125 10:05:20.870096 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/crc-debug-xd4sv" event={"ID":"73d7e557-d965-4e91-b187-a7b3a7eef893","Type":"ContainerStarted","Data":"c462e5ae220e29917f1013de6566f00abedf6ec3ebe8f2ee38142d6c1a587eb4"} Nov 25 10:05:21 crc kubenswrapper[4687]: I1125 10:05:21.412621 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rw8df/crc-debug-xd4sv"] Nov 25 10:05:21 crc kubenswrapper[4687]: I1125 10:05:21.421752 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rw8df/crc-debug-xd4sv"] Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.003861 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-xd4sv" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.103359 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/73d7e557-d965-4e91-b187-a7b3a7eef893-host\") pod \"73d7e557-d965-4e91-b187-a7b3a7eef893\" (UID: \"73d7e557-d965-4e91-b187-a7b3a7eef893\") " Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.103486 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/73d7e557-d965-4e91-b187-a7b3a7eef893-host" (OuterVolumeSpecName: "host") pod "73d7e557-d965-4e91-b187-a7b3a7eef893" (UID: "73d7e557-d965-4e91-b187-a7b3a7eef893"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.103624 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgrl6\" (UniqueName: \"kubernetes.io/projected/73d7e557-d965-4e91-b187-a7b3a7eef893-kube-api-access-bgrl6\") pod \"73d7e557-d965-4e91-b187-a7b3a7eef893\" (UID: \"73d7e557-d965-4e91-b187-a7b3a7eef893\") " Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.104175 4687 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/73d7e557-d965-4e91-b187-a7b3a7eef893-host\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.110934 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73d7e557-d965-4e91-b187-a7b3a7eef893-kube-api-access-bgrl6" (OuterVolumeSpecName: "kube-api-access-bgrl6") pod "73d7e557-d965-4e91-b187-a7b3a7eef893" (UID: "73d7e557-d965-4e91-b187-a7b3a7eef893"). InnerVolumeSpecName "kube-api-access-bgrl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.206381 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgrl6\" (UniqueName: \"kubernetes.io/projected/73d7e557-d965-4e91-b187-a7b3a7eef893-kube-api-access-bgrl6\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.593828 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rw8df/crc-debug-s9npv"] Nov 25 10:05:22 crc kubenswrapper[4687]: E1125 10:05:22.594358 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d7e557-d965-4e91-b187-a7b3a7eef893" containerName="container-00" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.594376 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d7e557-d965-4e91-b187-a7b3a7eef893" containerName="container-00" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.594630 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d7e557-d965-4e91-b187-a7b3a7eef893" containerName="container-00" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.595376 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-s9npv" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.731060 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a2c124f3-471d-4612-ac7b-2fb52eace90d-host\") pod \"crc-debug-s9npv\" (UID: \"a2c124f3-471d-4612-ac7b-2fb52eace90d\") " pod="openshift-must-gather-rw8df/crc-debug-s9npv" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.731142 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqdqd\" (UniqueName: \"kubernetes.io/projected/a2c124f3-471d-4612-ac7b-2fb52eace90d-kube-api-access-hqdqd\") pod \"crc-debug-s9npv\" (UID: \"a2c124f3-471d-4612-ac7b-2fb52eace90d\") " pod="openshift-must-gather-rw8df/crc-debug-s9npv" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.834520 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a2c124f3-471d-4612-ac7b-2fb52eace90d-host\") pod \"crc-debug-s9npv\" (UID: \"a2c124f3-471d-4612-ac7b-2fb52eace90d\") " pod="openshift-must-gather-rw8df/crc-debug-s9npv" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.834586 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqdqd\" (UniqueName: \"kubernetes.io/projected/a2c124f3-471d-4612-ac7b-2fb52eace90d-kube-api-access-hqdqd\") pod \"crc-debug-s9npv\" (UID: \"a2c124f3-471d-4612-ac7b-2fb52eace90d\") " pod="openshift-must-gather-rw8df/crc-debug-s9npv" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.835562 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a2c124f3-471d-4612-ac7b-2fb52eace90d-host\") pod \"crc-debug-s9npv\" (UID: \"a2c124f3-471d-4612-ac7b-2fb52eace90d\") " pod="openshift-must-gather-rw8df/crc-debug-s9npv" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.859015 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqdqd\" (UniqueName: \"kubernetes.io/projected/a2c124f3-471d-4612-ac7b-2fb52eace90d-kube-api-access-hqdqd\") pod \"crc-debug-s9npv\" (UID: \"a2c124f3-471d-4612-ac7b-2fb52eace90d\") " pod="openshift-must-gather-rw8df/crc-debug-s9npv" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.891571 4687 scope.go:117] "RemoveContainer" containerID="8cdced547d62a4809f127deccb67c0e8217f3344538e996a003487177419d8cf" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.891746 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-xd4sv" Nov 25 10:05:22 crc kubenswrapper[4687]: I1125 10:05:22.914384 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-s9npv" Nov 25 10:05:22 crc kubenswrapper[4687]: W1125 10:05:22.972109 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2c124f3_471d_4612_ac7b_2fb52eace90d.slice/crio-49db8e37d1693980695729855944564894af274e168a9b18048cc04ee59449cd WatchSource:0}: Error finding container 49db8e37d1693980695729855944564894af274e168a9b18048cc04ee59449cd: Status 404 returned error can't find the container with id 49db8e37d1693980695729855944564894af274e168a9b18048cc04ee59449cd Nov 25 10:05:23 crc kubenswrapper[4687]: I1125 10:05:23.747445 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73d7e557-d965-4e91-b187-a7b3a7eef893" path="/var/lib/kubelet/pods/73d7e557-d965-4e91-b187-a7b3a7eef893/volumes" Nov 25 10:05:23 crc kubenswrapper[4687]: I1125 10:05:23.904885 4687 generic.go:334] "Generic (PLEG): container finished" podID="a2c124f3-471d-4612-ac7b-2fb52eace90d" containerID="fd3fb06e4721d7404a92eb6f7f81934faf84cc5040e13df4aceaab8fd80b6dd6" exitCode=0 Nov 25 10:05:23 crc kubenswrapper[4687]: I1125 10:05:23.904928 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/crc-debug-s9npv" event={"ID":"a2c124f3-471d-4612-ac7b-2fb52eace90d","Type":"ContainerDied","Data":"fd3fb06e4721d7404a92eb6f7f81934faf84cc5040e13df4aceaab8fd80b6dd6"} Nov 25 10:05:23 crc kubenswrapper[4687]: I1125 10:05:23.904955 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/crc-debug-s9npv" event={"ID":"a2c124f3-471d-4612-ac7b-2fb52eace90d","Type":"ContainerStarted","Data":"49db8e37d1693980695729855944564894af274e168a9b18048cc04ee59449cd"} Nov 25 10:05:23 crc kubenswrapper[4687]: I1125 10:05:23.951598 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rw8df/crc-debug-s9npv"] Nov 25 10:05:23 crc kubenswrapper[4687]: I1125 10:05:23.962886 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rw8df/crc-debug-s9npv"] Nov 25 10:05:25 crc kubenswrapper[4687]: I1125 10:05:25.030318 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-s9npv" Nov 25 10:05:25 crc kubenswrapper[4687]: I1125 10:05:25.181423 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a2c124f3-471d-4612-ac7b-2fb52eace90d-host\") pod \"a2c124f3-471d-4612-ac7b-2fb52eace90d\" (UID: \"a2c124f3-471d-4612-ac7b-2fb52eace90d\") " Nov 25 10:05:25 crc kubenswrapper[4687]: I1125 10:05:25.181604 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a2c124f3-471d-4612-ac7b-2fb52eace90d-host" (OuterVolumeSpecName: "host") pod "a2c124f3-471d-4612-ac7b-2fb52eace90d" (UID: "a2c124f3-471d-4612-ac7b-2fb52eace90d"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:05:25 crc kubenswrapper[4687]: I1125 10:05:25.181670 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqdqd\" (UniqueName: \"kubernetes.io/projected/a2c124f3-471d-4612-ac7b-2fb52eace90d-kube-api-access-hqdqd\") pod \"a2c124f3-471d-4612-ac7b-2fb52eace90d\" (UID: \"a2c124f3-471d-4612-ac7b-2fb52eace90d\") " Nov 25 10:05:25 crc kubenswrapper[4687]: I1125 10:05:25.182450 4687 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a2c124f3-471d-4612-ac7b-2fb52eace90d-host\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:25 crc kubenswrapper[4687]: I1125 10:05:25.208786 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2c124f3-471d-4612-ac7b-2fb52eace90d-kube-api-access-hqdqd" (OuterVolumeSpecName: "kube-api-access-hqdqd") pod "a2c124f3-471d-4612-ac7b-2fb52eace90d" (UID: "a2c124f3-471d-4612-ac7b-2fb52eace90d"). InnerVolumeSpecName "kube-api-access-hqdqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:25 crc kubenswrapper[4687]: I1125 10:05:25.284569 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqdqd\" (UniqueName: \"kubernetes.io/projected/a2c124f3-471d-4612-ac7b-2fb52eace90d-kube-api-access-hqdqd\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:25 crc kubenswrapper[4687]: I1125 10:05:25.765215 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2c124f3-471d-4612-ac7b-2fb52eace90d" path="/var/lib/kubelet/pods/a2c124f3-471d-4612-ac7b-2fb52eace90d/volumes" Nov 25 10:05:25 crc kubenswrapper[4687]: I1125 10:05:25.924141 4687 scope.go:117] "RemoveContainer" containerID="fd3fb06e4721d7404a92eb6f7f81934faf84cc5040e13df4aceaab8fd80b6dd6" Nov 25 10:05:25 crc kubenswrapper[4687]: I1125 10:05:25.924172 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/crc-debug-s9npv" Nov 25 10:05:40 crc kubenswrapper[4687]: I1125 10:05:40.327019 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6fd7f79f6b-n9xqk_972adc4c-cd8b-4ead-a7da-1f21cf692157/barbican-api/0.log" Nov 25 10:05:40 crc kubenswrapper[4687]: I1125 10:05:40.455159 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6fd7f79f6b-n9xqk_972adc4c-cd8b-4ead-a7da-1f21cf692157/barbican-api-log/0.log" Nov 25 10:05:40 crc kubenswrapper[4687]: I1125 10:05:40.534017 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-674b9465d-cz7jd_50819727-088c-4d7f-bff7-c95d3d2ece69/barbican-keystone-listener/0.log" Nov 25 10:05:40 crc kubenswrapper[4687]: I1125 10:05:40.564835 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-674b9465d-cz7jd_50819727-088c-4d7f-bff7-c95d3d2ece69/barbican-keystone-listener-log/0.log" Nov 25 10:05:40 crc kubenswrapper[4687]: I1125 10:05:40.759358 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-c8494b867-8fmmw_90df3ed8-0bc0-4a26-940d-13dd51fd575a/barbican-worker/0.log" Nov 25 10:05:40 crc kubenswrapper[4687]: I1125 10:05:40.787213 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-c8494b867-8fmmw_90df3ed8-0bc0-4a26-940d-13dd51fd575a/barbican-worker-log/0.log" Nov 25 10:05:40 crc kubenswrapper[4687]: I1125 10:05:40.931240 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg_34a409ae-58d8-4746-83e8-f93d0e449216/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:40 crc kubenswrapper[4687]: I1125 10:05:40.974617 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_14bf5e9a-1354-4b0d-a475-2d3de20a07fd/ceilometer-central-agent/0.log" Nov 25 10:05:41 crc kubenswrapper[4687]: I1125 10:05:41.043046 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_14bf5e9a-1354-4b0d-a475-2d3de20a07fd/ceilometer-notification-agent/0.log" Nov 25 10:05:41 crc kubenswrapper[4687]: I1125 10:05:41.156814 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_14bf5e9a-1354-4b0d-a475-2d3de20a07fd/proxy-httpd/0.log" Nov 25 10:05:41 crc kubenswrapper[4687]: I1125 10:05:41.198180 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_14bf5e9a-1354-4b0d-a475-2d3de20a07fd/sg-core/0.log" Nov 25 10:05:41 crc kubenswrapper[4687]: I1125 10:05:41.283674 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bc9b51e3-3417-4e5d-86f6-2322c956f540/cinder-api/0.log" Nov 25 10:05:41 crc kubenswrapper[4687]: I1125 10:05:41.372748 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bc9b51e3-3417-4e5d-86f6-2322c956f540/cinder-api-log/0.log" Nov 25 10:05:41 crc kubenswrapper[4687]: I1125 10:05:41.492248 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_48978802-f2a2-41ad-bc63-e71b66b0747f/cinder-scheduler/0.log" Nov 25 10:05:41 crc kubenswrapper[4687]: I1125 10:05:41.528476 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_48978802-f2a2-41ad-bc63-e71b66b0747f/probe/0.log" Nov 25 10:05:41 crc kubenswrapper[4687]: I1125 10:05:41.639618 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr_d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:41 crc kubenswrapper[4687]: I1125 10:05:41.775059 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr_906f3fa0-c7e9-40dc-a876-0e0c9cdbc272/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:41 crc kubenswrapper[4687]: I1125 10:05:41.851233 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-bjn64_68d33f41-95f3-41f3-847a-76b368d367cd/init/0.log" Nov 25 10:05:42 crc kubenswrapper[4687]: I1125 10:05:42.030556 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-bjn64_68d33f41-95f3-41f3-847a-76b368d367cd/init/0.log" Nov 25 10:05:42 crc kubenswrapper[4687]: I1125 10:05:42.071291 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-bjn64_68d33f41-95f3-41f3-847a-76b368d367cd/dnsmasq-dns/0.log" Nov 25 10:05:42 crc kubenswrapper[4687]: I1125 10:05:42.099840 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-z6m94_c2600c05-1335-45a5-b7d3-4bfd661d7884/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:42 crc kubenswrapper[4687]: I1125 10:05:42.321413 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_603378ac-d3a5-43ec-bd0f-b4237683f553/glance-httpd/0.log" Nov 25 10:05:42 crc kubenswrapper[4687]: I1125 10:05:42.324538 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_603378ac-d3a5-43ec-bd0f-b4237683f553/glance-log/0.log" Nov 25 10:05:42 crc kubenswrapper[4687]: I1125 10:05:42.484912 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_255349d3-1260-430d-a74a-2fa4027d92b5/glance-httpd/0.log" Nov 25 10:05:42 crc kubenswrapper[4687]: I1125 10:05:42.509975 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_255349d3-1260-430d-a74a-2fa4027d92b5/glance-log/0.log" Nov 25 10:05:42 crc kubenswrapper[4687]: I1125 10:05:42.749120 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-69b7bcc78d-r6t7q_e4c7abdb-6d41-42c3-a228-27ebd825e7b5/horizon/0.log" Nov 25 10:05:43 crc kubenswrapper[4687]: I1125 10:05:43.079574 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-69b7bcc78d-r6t7q_e4c7abdb-6d41-42c3-a228-27ebd825e7b5/horizon-log/0.log" Nov 25 10:05:43 crc kubenswrapper[4687]: I1125 10:05:43.105376 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd_04071c43-9814-4c88-bd7e-f3b1c83f9dfc/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:43 crc kubenswrapper[4687]: I1125 10:05:43.201158 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-jh85t_29a959b9-db17-40b5-8c9b-f54bc3548ca2/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:43 crc kubenswrapper[4687]: I1125 10:05:43.457344 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401081-f9gz7_0fce13d2-b073-4d13-ae67-6a4a079ae3f1/keystone-cron/0.log" Nov 25 10:05:43 crc kubenswrapper[4687]: I1125 10:05:43.493057 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6cd548ffc8-p78fk_f8e2361f-7cd6-4055-8e0d-a53eda846c23/keystone-api/0.log" Nov 25 10:05:43 crc kubenswrapper[4687]: I1125 10:05:43.661940 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_20082470-4513-4042-8a66-3117b8a387f4/kube-state-metrics/0.log" Nov 25 10:05:43 crc kubenswrapper[4687]: I1125 10:05:43.747080 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn_7cf72d64-3a5f-42c4-a290-2244169a8a60/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:44 crc kubenswrapper[4687]: I1125 10:05:44.071170 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-fdc69b5cc-jz28l_e23cf6de-4d7f-40f1-aac9-a397b1c8bb36/neutron-api/0.log" Nov 25 10:05:44 crc kubenswrapper[4687]: I1125 10:05:44.179936 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-fdc69b5cc-jz28l_e23cf6de-4d7f-40f1-aac9-a397b1c8bb36/neutron-httpd/0.log" Nov 25 10:05:44 crc kubenswrapper[4687]: I1125 10:05:44.338051 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56_ec0b61ae-ccac-473b-ab43-e21daf1c348e/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:44 crc kubenswrapper[4687]: I1125 10:05:44.786565 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_8ef7fc72-708b-4994-9ced-44ec353121fc/nova-api-log/0.log" Nov 25 10:05:44 crc kubenswrapper[4687]: I1125 10:05:44.867950 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_e891aa7c-cb45-432d-9a15-1194e9700272/nova-cell0-conductor-conductor/0.log" Nov 25 10:05:44 crc kubenswrapper[4687]: I1125 10:05:44.997411 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_8ef7fc72-708b-4994-9ced-44ec353121fc/nova-api-api/0.log" Nov 25 10:05:45 crc kubenswrapper[4687]: I1125 10:05:45.095337 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_4f859978-9653-48fd-9c45-a2eb11561c0d/nova-cell1-conductor-conductor/0.log" Nov 25 10:05:45 crc kubenswrapper[4687]: I1125 10:05:45.192119 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_059b7fdf-9ca4-4f03-afa0-ee554a6aa858/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 10:05:45 crc kubenswrapper[4687]: I1125 10:05:45.380776 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-lvpfx_74866639-8460-4684-afe9-2e19c59db722/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:45 crc kubenswrapper[4687]: I1125 10:05:45.538346 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_28054f13-f14d-47dd-a07f-2e56cd710565/nova-metadata-log/0.log" Nov 25 10:05:45 crc kubenswrapper[4687]: I1125 10:05:45.857668 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_9e4aadac-caf5-4702-98c7-648843339aa5/nova-scheduler-scheduler/0.log" Nov 25 10:05:46 crc kubenswrapper[4687]: I1125 10:05:46.095197 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b1c0236e-917b-4c65-a9b7-6d3508c1f4a8/mysql-bootstrap/0.log" Nov 25 10:05:46 crc kubenswrapper[4687]: I1125 10:05:46.319569 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b1c0236e-917b-4c65-a9b7-6d3508c1f4a8/mysql-bootstrap/0.log" Nov 25 10:05:46 crc kubenswrapper[4687]: I1125 10:05:46.426804 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b1c0236e-917b-4c65-a9b7-6d3508c1f4a8/galera/0.log" Nov 25 10:05:46 crc kubenswrapper[4687]: I1125 10:05:46.531408 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7/mysql-bootstrap/0.log" Nov 25 10:05:46 crc kubenswrapper[4687]: I1125 10:05:46.739459 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7/mysql-bootstrap/0.log" Nov 25 10:05:46 crc kubenswrapper[4687]: I1125 10:05:46.777689 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7/galera/0.log" Nov 25 10:05:46 crc kubenswrapper[4687]: I1125 10:05:46.964889 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_f5dea615-cb9c-48fc-a557-9d8fbac041ac/openstackclient/0.log" Nov 25 10:05:46 crc kubenswrapper[4687]: I1125 10:05:46.980295 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_28054f13-f14d-47dd-a07f-2e56cd710565/nova-metadata-metadata/0.log" Nov 25 10:05:47 crc kubenswrapper[4687]: I1125 10:05:47.054553 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-fdpzn_6894bad0-9f1e-4d44-89a3-b06c6b24495a/ovn-controller/0.log" Nov 25 10:05:47 crc kubenswrapper[4687]: I1125 10:05:47.206052 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-g5gzm_b9a1cd27-5b10-422d-9629-a5a6c0bc128a/openstack-network-exporter/0.log" Nov 25 10:05:47 crc kubenswrapper[4687]: I1125 10:05:47.368484 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hstvx_ec3fcef6-578a-4687-9af6-18d6de32f1e1/ovsdb-server-init/0.log" Nov 25 10:05:47 crc kubenswrapper[4687]: I1125 10:05:47.483726 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hstvx_ec3fcef6-578a-4687-9af6-18d6de32f1e1/ovs-vswitchd/0.log" Nov 25 10:05:47 crc kubenswrapper[4687]: I1125 10:05:47.524688 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hstvx_ec3fcef6-578a-4687-9af6-18d6de32f1e1/ovsdb-server-init/0.log" Nov 25 10:05:47 crc kubenswrapper[4687]: I1125 10:05:47.555493 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hstvx_ec3fcef6-578a-4687-9af6-18d6de32f1e1/ovsdb-server/0.log" Nov 25 10:05:47 crc kubenswrapper[4687]: I1125 10:05:47.813071 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-wwsqh_db45ca10-ced6-46c4-84e8-ac525cd596b4/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:47 crc kubenswrapper[4687]: I1125 10:05:47.835309 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4e2efef0-3880-4d7a-bd93-59b596e470b8/openstack-network-exporter/0.log" Nov 25 10:05:47 crc kubenswrapper[4687]: I1125 10:05:47.953192 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4e2efef0-3880-4d7a-bd93-59b596e470b8/ovn-northd/0.log" Nov 25 10:05:48 crc kubenswrapper[4687]: I1125 10:05:48.054614 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9d20bc24-507c-4712-8c05-c8d3cfd4e87f/openstack-network-exporter/0.log" Nov 25 10:05:48 crc kubenswrapper[4687]: I1125 10:05:48.088357 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9d20bc24-507c-4712-8c05-c8d3cfd4e87f/ovsdbserver-nb/0.log" Nov 25 10:05:48 crc kubenswrapper[4687]: I1125 10:05:48.335175 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ae74a803-c417-4ab8-8842-20a575b77dd3/openstack-network-exporter/0.log" Nov 25 10:05:48 crc kubenswrapper[4687]: I1125 10:05:48.367418 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ae74a803-c417-4ab8-8842-20a575b77dd3/ovsdbserver-sb/0.log" Nov 25 10:05:48 crc kubenswrapper[4687]: I1125 10:05:48.580090 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bf794b984-bbcp5_ca801008-2024-4b8d-a69b-2f468a78f1a1/placement-api/0.log" Nov 25 10:05:48 crc kubenswrapper[4687]: I1125 10:05:48.631671 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_0f0fb06f-00e1-471a-855b-88f34608ca01/setup-container/0.log" Nov 25 10:05:48 crc kubenswrapper[4687]: I1125 10:05:48.664517 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bf794b984-bbcp5_ca801008-2024-4b8d-a69b-2f468a78f1a1/placement-log/0.log" Nov 25 10:05:48 crc kubenswrapper[4687]: I1125 10:05:48.876011 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_0f0fb06f-00e1-471a-855b-88f34608ca01/setup-container/0.log" Nov 25 10:05:48 crc kubenswrapper[4687]: I1125 10:05:48.954425 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_244d6f11-290e-4cbe-95b7-04b7555090a9/setup-container/0.log" Nov 25 10:05:48 crc kubenswrapper[4687]: I1125 10:05:48.958349 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_0f0fb06f-00e1-471a-855b-88f34608ca01/rabbitmq/0.log" Nov 25 10:05:49 crc kubenswrapper[4687]: I1125 10:05:49.109598 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_244d6f11-290e-4cbe-95b7-04b7555090a9/setup-container/0.log" Nov 25 10:05:49 crc kubenswrapper[4687]: I1125 10:05:49.207986 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_244d6f11-290e-4cbe-95b7-04b7555090a9/rabbitmq/0.log" Nov 25 10:05:49 crc kubenswrapper[4687]: I1125 10:05:49.240771 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68_9896502d-3c95-47e0-b75a-855221a19ebc/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:49 crc kubenswrapper[4687]: I1125 10:05:49.377765 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-xp2dr_0d29aa5d-d14b-4a11-9929-84c1770afb05/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:49 crc kubenswrapper[4687]: I1125 10:05:49.718952 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk_59cc9836-1eba-484b-9c23-78e3368be44c/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:49 crc kubenswrapper[4687]: I1125 10:05:49.837943 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-hcd94_84e0ed5c-dd14-40ef-bc65-5066ae662f34/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:49 crc kubenswrapper[4687]: I1125 10:05:49.987668 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-c42m9_a2095b70-3311-4f0a-a052-5c8f686fd304/ssh-known-hosts-edpm-deployment/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.223275 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6cc97f684c-lcsst_e4a5addf-9956-45b0-b761-affcce71a048/proxy-server/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.249164 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6cc97f684c-lcsst_e4a5addf-9956-45b0-b761-affcce71a048/proxy-httpd/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.334597 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-7x797_aee1a7b3-633b-455a-903a-7b00ef90ea07/swift-ring-rebalance/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.457693 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/account-reaper/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.473072 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/account-auditor/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.592182 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/account-replicator/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.676369 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/account-server/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.690580 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/container-auditor/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.748207 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/container-replicator/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.851639 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/container-server/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.896240 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/object-auditor/0.log" Nov 25 10:05:50 crc kubenswrapper[4687]: I1125 10:05:50.960612 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/container-updater/0.log" Nov 25 10:05:51 crc kubenswrapper[4687]: I1125 10:05:51.037663 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/object-expirer/0.log" Nov 25 10:05:51 crc kubenswrapper[4687]: I1125 10:05:51.098382 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/object-replicator/0.log" Nov 25 10:05:51 crc kubenswrapper[4687]: I1125 10:05:51.151310 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/object-server/0.log" Nov 25 10:05:51 crc kubenswrapper[4687]: I1125 10:05:51.250744 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/object-updater/0.log" Nov 25 10:05:51 crc kubenswrapper[4687]: I1125 10:05:51.297479 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/rsync/0.log" Nov 25 10:05:51 crc kubenswrapper[4687]: I1125 10:05:51.355607 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/swift-recon-cron/0.log" Nov 25 10:05:51 crc kubenswrapper[4687]: I1125 10:05:51.551156 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq_cc7503d0-7742-479f-94f2-d2fbffd48809/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:51 crc kubenswrapper[4687]: I1125 10:05:51.615646 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_f60c7882-f90a-4cfd-93a4-1cf51c29315a/tempest-tests-tempest-tests-runner/0.log" Nov 25 10:05:51 crc kubenswrapper[4687]: I1125 10:05:51.801262 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_b6d8db87-1023-4e20-99ca-6a755dc19fe3/test-operator-logs-container/0.log" Nov 25 10:05:51 crc kubenswrapper[4687]: I1125 10:05:51.836016 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-wnpct_ba70f059-0179-41d0-b0fe-2f0e0b4db2a7/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:05:59 crc kubenswrapper[4687]: I1125 10:05:59.743144 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_8b16fcc2-1dd1-47d5-979a-f50611173736/memcached/0.log" Nov 25 10:06:17 crc kubenswrapper[4687]: I1125 10:06:17.646056 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/util/0.log" Nov 25 10:06:17 crc kubenswrapper[4687]: I1125 10:06:17.840951 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/pull/0.log" Nov 25 10:06:17 crc kubenswrapper[4687]: I1125 10:06:17.863946 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/util/0.log" Nov 25 10:06:17 crc kubenswrapper[4687]: I1125 10:06:17.903883 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/pull/0.log" Nov 25 10:06:18 crc kubenswrapper[4687]: I1125 10:06:18.755085 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/util/0.log" Nov 25 10:06:18 crc kubenswrapper[4687]: I1125 10:06:18.802057 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/pull/0.log" Nov 25 10:06:18 crc kubenswrapper[4687]: I1125 10:06:18.830856 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/extract/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.035717 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-4dn9x_a5f5d45b-b0ce-48f8-892e-02571e1f9f24/manager/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.046794 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-4dn9x_a5f5d45b-b0ce-48f8-892e-02571e1f9f24/kube-rbac-proxy/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.059213 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-ckt6f_26dc2622-a74f-405c-9bbb-291adb145908/kube-rbac-proxy/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.292764 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-jcrff_4d75764d-49d9-4482-98a9-728dd977f2bd/manager/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.313897 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-ckt6f_26dc2622-a74f-405c-9bbb-291adb145908/manager/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.322857 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-jcrff_4d75764d-49d9-4482-98a9-728dd977f2bd/kube-rbac-proxy/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.460398 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-wxt5h_ed7a2e30-6110-4b1c-864f-4856c4c0ec8a/kube-rbac-proxy/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.591169 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-wxt5h_ed7a2e30-6110-4b1c-864f-4856c4c0ec8a/manager/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.625832 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-zwv7l_9c7f6da9-8178-4c3f-b565-9f6eca26c6c7/kube-rbac-proxy/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.695760 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-zwv7l_9c7f6da9-8178-4c3f-b565-9f6eca26c6c7/manager/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.808151 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-lsr97_956b1e07-e4b1-44cf-9990-ae928a3e11c7/kube-rbac-proxy/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.883437 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-lsr97_956b1e07-e4b1-44cf-9990-ae928a3e11c7/manager/0.log" Nov 25 10:06:19 crc kubenswrapper[4687]: I1125 10:06:19.949949 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-w2vc9_0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14/kube-rbac-proxy/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.124683 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-7r5w5_7c015be6-1e7f-404b-9ea0-31cbec410081/kube-rbac-proxy/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.175968 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-7r5w5_7c015be6-1e7f-404b-9ea0-31cbec410081/manager/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.276865 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-w2vc9_0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14/manager/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.354881 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-khvhw_dc4b5a7c-5e58-42a7-b1ee-676268f99e21/kube-rbac-proxy/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.428141 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-khvhw_dc4b5a7c-5e58-42a7-b1ee-676268f99e21/manager/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.512283 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-cf25t_469145fd-b998-4c0a-b356-508c4940f78b/kube-rbac-proxy/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.595436 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-cf25t_469145fd-b998-4c0a-b356-508c4940f78b/manager/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.690984 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-qfcjj_b07b54a0-d4b4-49e3-bd03-810eeefa6fa7/kube-rbac-proxy/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.810793 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-qfcjj_b07b54a0-d4b4-49e3-bd03-810eeefa6fa7/manager/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.866220 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-qzdnw_555e5cf5-f2f8-46f2-ab17-8589c7391fc8/kube-rbac-proxy/0.log" Nov 25 10:06:20 crc kubenswrapper[4687]: I1125 10:06:20.927297 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-qzdnw_555e5cf5-f2f8-46f2-ab17-8589c7391fc8/manager/0.log" Nov 25 10:06:21 crc kubenswrapper[4687]: I1125 10:06:21.044001 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-99k8n_62a14c39-245f-4c8d-84b5-b23d023d810f/kube-rbac-proxy/0.log" Nov 25 10:06:21 crc kubenswrapper[4687]: I1125 10:06:21.154485 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-99k8n_62a14c39-245f-4c8d-84b5-b23d023d810f/manager/0.log" Nov 25 10:06:21 crc kubenswrapper[4687]: I1125 10:06:21.264185 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-z59ft_78edbfb4-5838-4c2d-a4e3-e1512bb55654/kube-rbac-proxy/0.log" Nov 25 10:06:21 crc kubenswrapper[4687]: I1125 10:06:21.273844 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-z59ft_78edbfb4-5838-4c2d-a4e3-e1512bb55654/manager/0.log" Nov 25 10:06:21 crc kubenswrapper[4687]: I1125 10:06:21.391202 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5_9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be/kube-rbac-proxy/0.log" Nov 25 10:06:21 crc kubenswrapper[4687]: I1125 10:06:21.469441 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5_9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be/manager/0.log" Nov 25 10:06:21 crc kubenswrapper[4687]: I1125 10:06:21.774183 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5888c99dff-lmgz9_35a98c5c-b3b4-4e95-821d-923a693b67e0/operator/0.log" Nov 25 10:06:21 crc kubenswrapper[4687]: I1125 10:06:21.775061 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-xg64b_fabb562b-35a6-4e1d-bdd5-5357491f9ad6/registry-server/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.017554 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-c8plr_be89cce1-89d8-47da-b777-f7805762b230/kube-rbac-proxy/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.085925 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-c8plr_be89cce1-89d8-47da-b777-f7805762b230/manager/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.094686 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-qsrlz_99367bef-5882-4884-8fe5-9a3ff8edd1cb/kube-rbac-proxy/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.266375 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-qsrlz_99367bef-5882-4884-8fe5-9a3ff8edd1cb/manager/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.408973 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-6lbv2_5a4bd509-a298-4fff-845b-262a41634134/operator/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.493308 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-pqd76_ac5487a2-ce65-4034-973b-b939494aef63/kube-rbac-proxy/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.604793 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-pqd76_ac5487a2-ce65-4034-973b-b939494aef63/manager/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.667784 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-d664976d5-hdtkm_72c3a2af-6e0e-4862-b638-2694a71f1e5a/manager/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.676473 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-s9vtg_4a1d4849-2906-4fd5-b54e-7f2e567f05ef/kube-rbac-proxy/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.822536 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-s9vtg_4a1d4849-2906-4fd5-b54e-7f2e567f05ef/manager/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.904068 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-7jmgj_9fac1200-5b4d-4032-98aa-d293d13fdcc7/kube-rbac-proxy/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.919151 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-7jmgj_9fac1200-5b4d-4032-98aa-d293d13fdcc7/manager/0.log" Nov 25 10:06:22 crc kubenswrapper[4687]: I1125 10:06:22.984272 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-ll9p9_c2ea5569-33b4-403d-9303-770ec432f4cc/kube-rbac-proxy/0.log" Nov 25 10:06:23 crc kubenswrapper[4687]: I1125 10:06:23.137457 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-ll9p9_c2ea5569-33b4-403d-9303-770ec432f4cc/manager/0.log" Nov 25 10:06:23 crc kubenswrapper[4687]: I1125 10:06:23.844845 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:06:23 crc kubenswrapper[4687]: I1125 10:06:23.844926 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:06:39 crc kubenswrapper[4687]: I1125 10:06:39.144642 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-j4vxg_74dce578-27e9-4dc2-ac45-9019de15d559/control-plane-machine-set-operator/0.log" Nov 25 10:06:39 crc kubenswrapper[4687]: I1125 10:06:39.246965 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-v9zjk_25aecd46-36d8-4ee9-bae5-4731e91b5e74/kube-rbac-proxy/0.log" Nov 25 10:06:39 crc kubenswrapper[4687]: I1125 10:06:39.292673 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-v9zjk_25aecd46-36d8-4ee9-bae5-4731e91b5e74/machine-api-operator/0.log" Nov 25 10:06:50 crc kubenswrapper[4687]: I1125 10:06:50.555155 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-tc7r6_0ff14130-8c48-4847-8fa6-1ba61b371244/cert-manager-controller/0.log" Nov 25 10:06:50 crc kubenswrapper[4687]: I1125 10:06:50.768932 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-gk2n9_643e6584-e9ad-4fe0-96a6-d1dda245fe76/cert-manager-cainjector/0.log" Nov 25 10:06:50 crc kubenswrapper[4687]: I1125 10:06:50.824630 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-f22x8_4506551b-78bf-4fe5-8b60-e9e34a53c8df/cert-manager-webhook/0.log" Nov 25 10:06:53 crc kubenswrapper[4687]: I1125 10:06:53.845042 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:06:53 crc kubenswrapper[4687]: I1125 10:06:53.845821 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:07:02 crc kubenswrapper[4687]: I1125 10:07:02.055910 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-7zd5p_5197825b-263a-49cc-abde-f5863cac4989/nmstate-console-plugin/0.log" Nov 25 10:07:02 crc kubenswrapper[4687]: I1125 10:07:02.218587 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-dj956_39575ce1-8fae-41c6-8603-a4d49c101e7d/nmstate-handler/0.log" Nov 25 10:07:02 crc kubenswrapper[4687]: I1125 10:07:02.309209 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-9n22k_5e374138-9c9c-41b4-a2d1-eab48197d4bb/kube-rbac-proxy/0.log" Nov 25 10:07:02 crc kubenswrapper[4687]: I1125 10:07:02.332623 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-9n22k_5e374138-9c9c-41b4-a2d1-eab48197d4bb/nmstate-metrics/0.log" Nov 25 10:07:02 crc kubenswrapper[4687]: I1125 10:07:02.467734 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-vgd6d_4e8ba187-a91b-4312-bf06-eb0c5f0e5bd9/nmstate-operator/0.log" Nov 25 10:07:02 crc kubenswrapper[4687]: I1125 10:07:02.557161 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-p89k9_e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989/nmstate-webhook/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.123566 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-6jtwr_eb69d750-834f-4728-8a20-f37dc1195e86/kube-rbac-proxy/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.281258 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-6jtwr_eb69d750-834f-4728-8a20-f37dc1195e86/controller/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.283474 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-frr-files/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.513312 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-metrics/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.518606 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-frr-files/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.548566 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-reloader/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.605271 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-reloader/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.759206 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-frr-files/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.799088 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-metrics/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.804862 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-reloader/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.804928 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-metrics/0.log" Nov 25 10:07:16 crc kubenswrapper[4687]: I1125 10:07:16.960184 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-frr-files/0.log" Nov 25 10:07:17 crc kubenswrapper[4687]: I1125 10:07:17.002111 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-metrics/0.log" Nov 25 10:07:17 crc kubenswrapper[4687]: I1125 10:07:17.005496 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/controller/0.log" Nov 25 10:07:17 crc kubenswrapper[4687]: I1125 10:07:17.008272 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-reloader/0.log" Nov 25 10:07:17 crc kubenswrapper[4687]: I1125 10:07:17.179287 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/kube-rbac-proxy/0.log" Nov 25 10:07:17 crc kubenswrapper[4687]: I1125 10:07:17.188131 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/frr-metrics/0.log" Nov 25 10:07:17 crc kubenswrapper[4687]: I1125 10:07:17.207922 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/kube-rbac-proxy-frr/0.log" Nov 25 10:07:17 crc kubenswrapper[4687]: I1125 10:07:17.341975 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/reloader/0.log" Nov 25 10:07:17 crc kubenswrapper[4687]: I1125 10:07:17.527148 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-ddrgp_b9abb225-82ca-44ea-a30c-ec214deb3316/frr-k8s-webhook-server/0.log" Nov 25 10:07:17 crc kubenswrapper[4687]: I1125 10:07:17.706443 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-558db5dd86-fc58n_66122fe0-e231-48e6-8051-a04d330d8f17/manager/0.log" Nov 25 10:07:17 crc kubenswrapper[4687]: I1125 10:07:17.864406 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-84db77dcc8-bjgl5_8ed9d933-29d2-4e13-bb9b-377cdc8cf10a/webhook-server/0.log" Nov 25 10:07:18 crc kubenswrapper[4687]: I1125 10:07:18.035299 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-q7pt2_a2ef7e49-e737-462f-8ff8-b045611d5baf/kube-rbac-proxy/0.log" Nov 25 10:07:18 crc kubenswrapper[4687]: I1125 10:07:18.591194 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-q7pt2_a2ef7e49-e737-462f-8ff8-b045611d5baf/speaker/0.log" Nov 25 10:07:18 crc kubenswrapper[4687]: I1125 10:07:18.857872 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/frr/0.log" Nov 25 10:07:23 crc kubenswrapper[4687]: I1125 10:07:23.845151 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:07:23 crc kubenswrapper[4687]: I1125 10:07:23.845867 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:07:23 crc kubenswrapper[4687]: I1125 10:07:23.845928 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 10:07:23 crc kubenswrapper[4687]: I1125 10:07:23.846732 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:07:23 crc kubenswrapper[4687]: I1125 10:07:23.846795 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" gracePeriod=600 Nov 25 10:07:23 crc kubenswrapper[4687]: E1125 10:07:23.964819 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:07:24 crc kubenswrapper[4687]: I1125 10:07:24.067901 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" exitCode=0 Nov 25 10:07:24 crc kubenswrapper[4687]: I1125 10:07:24.067949 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9"} Nov 25 10:07:24 crc kubenswrapper[4687]: I1125 10:07:24.068016 4687 scope.go:117] "RemoveContainer" containerID="664d21be4e75ad4b02c7ff82884f9c15b596c7e753f3267df4c71202095aa0a7" Nov 25 10:07:24 crc kubenswrapper[4687]: I1125 10:07:24.068955 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:07:24 crc kubenswrapper[4687]: E1125 10:07:24.069224 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.206879 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/util/0.log" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.481885 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/util/0.log" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.499909 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/pull/0.log" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.499975 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/pull/0.log" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.589265 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/util/0.log" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.637493 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/pull/0.log" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.691637 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/extract/0.log" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.779120 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-utilities/0.log" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.914926 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-utilities/0.log" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.929559 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-content/0.log" Nov 25 10:07:30 crc kubenswrapper[4687]: I1125 10:07:30.962179 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-content/0.log" Nov 25 10:07:31 crc kubenswrapper[4687]: I1125 10:07:31.165516 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-content/0.log" Nov 25 10:07:31 crc kubenswrapper[4687]: I1125 10:07:31.213369 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-utilities/0.log" Nov 25 10:07:31 crc kubenswrapper[4687]: I1125 10:07:31.393099 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-utilities/0.log" Nov 25 10:07:31 crc kubenswrapper[4687]: I1125 10:07:31.573091 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/registry-server/0.log" Nov 25 10:07:31 crc kubenswrapper[4687]: I1125 10:07:31.633573 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-content/0.log" Nov 25 10:07:31 crc kubenswrapper[4687]: I1125 10:07:31.659174 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-utilities/0.log" Nov 25 10:07:31 crc kubenswrapper[4687]: I1125 10:07:31.680126 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-content/0.log" Nov 25 10:07:31 crc kubenswrapper[4687]: I1125 10:07:31.801890 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-content/0.log" Nov 25 10:07:31 crc kubenswrapper[4687]: I1125 10:07:31.816016 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-utilities/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.053030 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/util/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.209217 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/util/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.226312 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/pull/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.282658 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/pull/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.512805 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/util/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.548950 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/pull/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.582093 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/extract/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.583101 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/registry-server/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.772085 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-utilities/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.783375 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-sdgfq_a26f1e8c-3181-4dbf-b2b6-13772b1d66d6/marketplace-operator/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.982196 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-content/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.986200 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-content/0.log" Nov 25 10:07:32 crc kubenswrapper[4687]: I1125 10:07:32.990981 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-utilities/0.log" Nov 25 10:07:33 crc kubenswrapper[4687]: I1125 10:07:33.214315 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-utilities/0.log" Nov 25 10:07:33 crc kubenswrapper[4687]: I1125 10:07:33.226844 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-content/0.log" Nov 25 10:07:33 crc kubenswrapper[4687]: I1125 10:07:33.339876 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/registry-server/0.log" Nov 25 10:07:33 crc kubenswrapper[4687]: I1125 10:07:33.432106 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-utilities/0.log" Nov 25 10:07:33 crc kubenswrapper[4687]: I1125 10:07:33.521680 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-content/0.log" Nov 25 10:07:33 crc kubenswrapper[4687]: I1125 10:07:33.575073 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-content/0.log" Nov 25 10:07:33 crc kubenswrapper[4687]: I1125 10:07:33.575879 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-utilities/0.log" Nov 25 10:07:33 crc kubenswrapper[4687]: I1125 10:07:33.824452 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-content/0.log" Nov 25 10:07:33 crc kubenswrapper[4687]: I1125 10:07:33.831992 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-utilities/0.log" Nov 25 10:07:34 crc kubenswrapper[4687]: I1125 10:07:34.366083 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/registry-server/0.log" Nov 25 10:07:35 crc kubenswrapper[4687]: I1125 10:07:35.742310 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:07:35 crc kubenswrapper[4687]: E1125 10:07:35.742729 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:07:50 crc kubenswrapper[4687]: I1125 10:07:50.735237 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:07:50 crc kubenswrapper[4687]: E1125 10:07:50.736275 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:07:52 crc kubenswrapper[4687]: I1125 10:07:52.964589 4687 scope.go:117] "RemoveContainer" containerID="9d8f9d12043203d52ce387e05b7182595439e9fccc4e5583e97d1b56a193b696" Nov 25 10:07:53 crc kubenswrapper[4687]: I1125 10:07:53.003842 4687 scope.go:117] "RemoveContainer" containerID="6950f73c2b69ba374a0989a5432ca49d9eb16b85ac0603b6d618096bf51e0067" Nov 25 10:07:53 crc kubenswrapper[4687]: I1125 10:07:53.094871 4687 scope.go:117] "RemoveContainer" containerID="4a35dea7d3ea60250acca5efb78783e377ebb67cd7e929f2aee1dcedf84f7abc" Nov 25 10:08:02 crc kubenswrapper[4687]: I1125 10:08:02.743350 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:08:02 crc kubenswrapper[4687]: E1125 10:08:02.744079 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:08:17 crc kubenswrapper[4687]: I1125 10:08:17.735971 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:08:17 crc kubenswrapper[4687]: E1125 10:08:17.737232 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:08:30 crc kubenswrapper[4687]: I1125 10:08:30.734272 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:08:30 crc kubenswrapper[4687]: E1125 10:08:30.735418 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:08:44 crc kubenswrapper[4687]: I1125 10:08:44.735099 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:08:44 crc kubenswrapper[4687]: E1125 10:08:44.736043 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:08:58 crc kubenswrapper[4687]: I1125 10:08:58.734657 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:08:58 crc kubenswrapper[4687]: E1125 10:08:58.735347 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:09:13 crc kubenswrapper[4687]: I1125 10:09:13.736452 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:09:13 crc kubenswrapper[4687]: E1125 10:09:13.740115 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:09:23 crc kubenswrapper[4687]: I1125 10:09:23.218286 4687 generic.go:334] "Generic (PLEG): container finished" podID="bc9d9172-75a3-4c5b-b0ec-255bc67b4161" containerID="ea72f561a01420d45a34ff94a7fc7a13aa0101a79a52e8d4106e0f6598b15162" exitCode=0 Nov 25 10:09:23 crc kubenswrapper[4687]: I1125 10:09:23.218379 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rw8df/must-gather-9gb4x" event={"ID":"bc9d9172-75a3-4c5b-b0ec-255bc67b4161","Type":"ContainerDied","Data":"ea72f561a01420d45a34ff94a7fc7a13aa0101a79a52e8d4106e0f6598b15162"} Nov 25 10:09:23 crc kubenswrapper[4687]: I1125 10:09:23.219622 4687 scope.go:117] "RemoveContainer" containerID="ea72f561a01420d45a34ff94a7fc7a13aa0101a79a52e8d4106e0f6598b15162" Nov 25 10:09:23 crc kubenswrapper[4687]: I1125 10:09:23.411447 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rw8df_must-gather-9gb4x_bc9d9172-75a3-4c5b-b0ec-255bc67b4161/gather/0.log" Nov 25 10:09:24 crc kubenswrapper[4687]: I1125 10:09:24.735293 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:09:24 crc kubenswrapper[4687]: E1125 10:09:24.735745 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:09:31 crc kubenswrapper[4687]: I1125 10:09:31.637998 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rw8df/must-gather-9gb4x"] Nov 25 10:09:31 crc kubenswrapper[4687]: I1125 10:09:31.639026 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-rw8df/must-gather-9gb4x" podUID="bc9d9172-75a3-4c5b-b0ec-255bc67b4161" containerName="copy" containerID="cri-o://7b4307aea17d186b65618435bd26b7260031971493391e4d259be17f253013fc" gracePeriod=2 Nov 25 10:09:31 crc kubenswrapper[4687]: I1125 10:09:31.645948 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rw8df/must-gather-9gb4x"] Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.309961 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rw8df_must-gather-9gb4x_bc9d9172-75a3-4c5b-b0ec-255bc67b4161/copy/0.log" Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.310813 4687 generic.go:334] "Generic (PLEG): container finished" podID="bc9d9172-75a3-4c5b-b0ec-255bc67b4161" containerID="7b4307aea17d186b65618435bd26b7260031971493391e4d259be17f253013fc" exitCode=143 Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.310876 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb731c597ee3c31385df3996cfba7ee2e24444dc0cb64f8b40112d8046b7a0ea" Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.372977 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rw8df_must-gather-9gb4x_bc9d9172-75a3-4c5b-b0ec-255bc67b4161/copy/0.log" Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.373582 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/must-gather-9gb4x" Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.527458 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-must-gather-output\") pod \"bc9d9172-75a3-4c5b-b0ec-255bc67b4161\" (UID: \"bc9d9172-75a3-4c5b-b0ec-255bc67b4161\") " Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.527623 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2pr4\" (UniqueName: \"kubernetes.io/projected/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-kube-api-access-r2pr4\") pod \"bc9d9172-75a3-4c5b-b0ec-255bc67b4161\" (UID: \"bc9d9172-75a3-4c5b-b0ec-255bc67b4161\") " Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.534425 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-kube-api-access-r2pr4" (OuterVolumeSpecName: "kube-api-access-r2pr4") pod "bc9d9172-75a3-4c5b-b0ec-255bc67b4161" (UID: "bc9d9172-75a3-4c5b-b0ec-255bc67b4161"). InnerVolumeSpecName "kube-api-access-r2pr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.630540 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2pr4\" (UniqueName: \"kubernetes.io/projected/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-kube-api-access-r2pr4\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.689599 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "bc9d9172-75a3-4c5b-b0ec-255bc67b4161" (UID: "bc9d9172-75a3-4c5b-b0ec-255bc67b4161"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:32 crc kubenswrapper[4687]: I1125 10:09:32.732310 4687 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/bc9d9172-75a3-4c5b-b0ec-255bc67b4161-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:33 crc kubenswrapper[4687]: I1125 10:09:33.318369 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rw8df/must-gather-9gb4x" Nov 25 10:09:33 crc kubenswrapper[4687]: I1125 10:09:33.748166 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc9d9172-75a3-4c5b-b0ec-255bc67b4161" path="/var/lib/kubelet/pods/bc9d9172-75a3-4c5b-b0ec-255bc67b4161/volumes" Nov 25 10:09:38 crc kubenswrapper[4687]: I1125 10:09:38.735039 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:09:38 crc kubenswrapper[4687]: E1125 10:09:38.735982 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:09:52 crc kubenswrapper[4687]: I1125 10:09:52.734949 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:09:52 crc kubenswrapper[4687]: E1125 10:09:52.736727 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:10:05 crc kubenswrapper[4687]: I1125 10:10:05.740962 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:10:05 crc kubenswrapper[4687]: E1125 10:10:05.741836 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:10:16 crc kubenswrapper[4687]: I1125 10:10:16.829394 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:10:16 crc kubenswrapper[4687]: E1125 10:10:16.830322 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:10:29 crc kubenswrapper[4687]: I1125 10:10:29.736304 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:10:29 crc kubenswrapper[4687]: E1125 10:10:29.737407 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:10:44 crc kubenswrapper[4687]: I1125 10:10:44.736740 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:10:44 crc kubenswrapper[4687]: E1125 10:10:44.737819 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:10:53 crc kubenswrapper[4687]: I1125 10:10:53.214453 4687 scope.go:117] "RemoveContainer" containerID="ea72f561a01420d45a34ff94a7fc7a13aa0101a79a52e8d4106e0f6598b15162" Nov 25 10:10:53 crc kubenswrapper[4687]: I1125 10:10:53.271389 4687 scope.go:117] "RemoveContainer" containerID="7b4307aea17d186b65618435bd26b7260031971493391e4d259be17f253013fc" Nov 25 10:10:59 crc kubenswrapper[4687]: I1125 10:10:59.735130 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:10:59 crc kubenswrapper[4687]: E1125 10:10:59.735953 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:11:14 crc kubenswrapper[4687]: I1125 10:11:14.734961 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:11:14 crc kubenswrapper[4687]: E1125 10:11:14.735714 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:11:29 crc kubenswrapper[4687]: I1125 10:11:29.736281 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:11:29 crc kubenswrapper[4687]: E1125 10:11:29.737697 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:11:41 crc kubenswrapper[4687]: I1125 10:11:41.736731 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:11:41 crc kubenswrapper[4687]: E1125 10:11:41.737711 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:11:54 crc kubenswrapper[4687]: I1125 10:11:54.735445 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:11:54 crc kubenswrapper[4687]: E1125 10:11:54.736143 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:12:05 crc kubenswrapper[4687]: I1125 10:12:05.750653 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:12:05 crc kubenswrapper[4687]: E1125 10:12:05.751682 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.760569 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pc7f9/must-gather-ltxcn"] Nov 25 10:12:14 crc kubenswrapper[4687]: E1125 10:12:14.761622 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc9d9172-75a3-4c5b-b0ec-255bc67b4161" containerName="copy" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.761641 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc9d9172-75a3-4c5b-b0ec-255bc67b4161" containerName="copy" Nov 25 10:12:14 crc kubenswrapper[4687]: E1125 10:12:14.761669 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2c124f3-471d-4612-ac7b-2fb52eace90d" containerName="container-00" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.761679 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2c124f3-471d-4612-ac7b-2fb52eace90d" containerName="container-00" Nov 25 10:12:14 crc kubenswrapper[4687]: E1125 10:12:14.761714 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc9d9172-75a3-4c5b-b0ec-255bc67b4161" containerName="gather" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.761722 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc9d9172-75a3-4c5b-b0ec-255bc67b4161" containerName="gather" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.761949 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc9d9172-75a3-4c5b-b0ec-255bc67b4161" containerName="gather" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.761974 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc9d9172-75a3-4c5b-b0ec-255bc67b4161" containerName="copy" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.761993 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2c124f3-471d-4612-ac7b-2fb52eace90d" containerName="container-00" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.763197 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/must-gather-ltxcn" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.769289 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pc7f9"/"openshift-service-ca.crt" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.769456 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pc7f9"/"kube-root-ca.crt" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.772933 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pc7f9"/"default-dockercfg-tjnxk" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.772933 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pc7f9/must-gather-ltxcn"] Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.807238 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b5895325-0d62-4355-99f5-edf281f9a5c6-must-gather-output\") pod \"must-gather-ltxcn\" (UID: \"b5895325-0d62-4355-99f5-edf281f9a5c6\") " pod="openshift-must-gather-pc7f9/must-gather-ltxcn" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.807672 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jlvd\" (UniqueName: \"kubernetes.io/projected/b5895325-0d62-4355-99f5-edf281f9a5c6-kube-api-access-8jlvd\") pod \"must-gather-ltxcn\" (UID: \"b5895325-0d62-4355-99f5-edf281f9a5c6\") " pod="openshift-must-gather-pc7f9/must-gather-ltxcn" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.909237 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b5895325-0d62-4355-99f5-edf281f9a5c6-must-gather-output\") pod \"must-gather-ltxcn\" (UID: \"b5895325-0d62-4355-99f5-edf281f9a5c6\") " pod="openshift-must-gather-pc7f9/must-gather-ltxcn" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.909304 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jlvd\" (UniqueName: \"kubernetes.io/projected/b5895325-0d62-4355-99f5-edf281f9a5c6-kube-api-access-8jlvd\") pod \"must-gather-ltxcn\" (UID: \"b5895325-0d62-4355-99f5-edf281f9a5c6\") " pod="openshift-must-gather-pc7f9/must-gather-ltxcn" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.910008 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b5895325-0d62-4355-99f5-edf281f9a5c6-must-gather-output\") pod \"must-gather-ltxcn\" (UID: \"b5895325-0d62-4355-99f5-edf281f9a5c6\") " pod="openshift-must-gather-pc7f9/must-gather-ltxcn" Nov 25 10:12:14 crc kubenswrapper[4687]: I1125 10:12:14.945551 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jlvd\" (UniqueName: \"kubernetes.io/projected/b5895325-0d62-4355-99f5-edf281f9a5c6-kube-api-access-8jlvd\") pod \"must-gather-ltxcn\" (UID: \"b5895325-0d62-4355-99f5-edf281f9a5c6\") " pod="openshift-must-gather-pc7f9/must-gather-ltxcn" Nov 25 10:12:15 crc kubenswrapper[4687]: I1125 10:12:15.090446 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/must-gather-ltxcn" Nov 25 10:12:15 crc kubenswrapper[4687]: I1125 10:12:15.585944 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pc7f9/must-gather-ltxcn"] Nov 25 10:12:15 crc kubenswrapper[4687]: I1125 10:12:15.912830 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/must-gather-ltxcn" event={"ID":"b5895325-0d62-4355-99f5-edf281f9a5c6","Type":"ContainerStarted","Data":"7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00"} Nov 25 10:12:15 crc kubenswrapper[4687]: I1125 10:12:15.913194 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/must-gather-ltxcn" event={"ID":"b5895325-0d62-4355-99f5-edf281f9a5c6","Type":"ContainerStarted","Data":"feead04379b8a8d2d53f2311eb6f0e94b63f530a9fe9fe8297139d369d22fc64"} Nov 25 10:12:16 crc kubenswrapper[4687]: I1125 10:12:16.923313 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/must-gather-ltxcn" event={"ID":"b5895325-0d62-4355-99f5-edf281f9a5c6","Type":"ContainerStarted","Data":"de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc"} Nov 25 10:12:16 crc kubenswrapper[4687]: I1125 10:12:16.948091 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pc7f9/must-gather-ltxcn" podStartSLOduration=2.948062047 podStartE2EDuration="2.948062047s" podCreationTimestamp="2025-11-25 10:12:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:12:16.938676743 +0000 UTC m=+4131.992316461" watchObservedRunningTime="2025-11-25 10:12:16.948062047 +0000 UTC m=+4132.001701775" Nov 25 10:12:17 crc kubenswrapper[4687]: I1125 10:12:17.760608 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:12:17 crc kubenswrapper[4687]: E1125 10:12:17.761190 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:12:19 crc kubenswrapper[4687]: I1125 10:12:19.839039 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pc7f9/crc-debug-2d2mz"] Nov 25 10:12:19 crc kubenswrapper[4687]: I1125 10:12:19.840931 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" Nov 25 10:12:20 crc kubenswrapper[4687]: I1125 10:12:20.010206 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t57mm\" (UniqueName: \"kubernetes.io/projected/eeb46128-89b6-4a49-94a1-6bf513a5e80b-kube-api-access-t57mm\") pod \"crc-debug-2d2mz\" (UID: \"eeb46128-89b6-4a49-94a1-6bf513a5e80b\") " pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" Nov 25 10:12:20 crc kubenswrapper[4687]: I1125 10:12:20.010407 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/eeb46128-89b6-4a49-94a1-6bf513a5e80b-host\") pod \"crc-debug-2d2mz\" (UID: \"eeb46128-89b6-4a49-94a1-6bf513a5e80b\") " pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" Nov 25 10:12:20 crc kubenswrapper[4687]: I1125 10:12:20.112542 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/eeb46128-89b6-4a49-94a1-6bf513a5e80b-host\") pod \"crc-debug-2d2mz\" (UID: \"eeb46128-89b6-4a49-94a1-6bf513a5e80b\") " pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" Nov 25 10:12:20 crc kubenswrapper[4687]: I1125 10:12:20.112641 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t57mm\" (UniqueName: \"kubernetes.io/projected/eeb46128-89b6-4a49-94a1-6bf513a5e80b-kube-api-access-t57mm\") pod \"crc-debug-2d2mz\" (UID: \"eeb46128-89b6-4a49-94a1-6bf513a5e80b\") " pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" Nov 25 10:12:20 crc kubenswrapper[4687]: I1125 10:12:20.113111 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/eeb46128-89b6-4a49-94a1-6bf513a5e80b-host\") pod \"crc-debug-2d2mz\" (UID: \"eeb46128-89b6-4a49-94a1-6bf513a5e80b\") " pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" Nov 25 10:12:20 crc kubenswrapper[4687]: I1125 10:12:20.148425 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t57mm\" (UniqueName: \"kubernetes.io/projected/eeb46128-89b6-4a49-94a1-6bf513a5e80b-kube-api-access-t57mm\") pod \"crc-debug-2d2mz\" (UID: \"eeb46128-89b6-4a49-94a1-6bf513a5e80b\") " pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" Nov 25 10:12:20 crc kubenswrapper[4687]: I1125 10:12:20.173476 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" Nov 25 10:12:20 crc kubenswrapper[4687]: I1125 10:12:20.980924 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" event={"ID":"eeb46128-89b6-4a49-94a1-6bf513a5e80b","Type":"ContainerStarted","Data":"6e410b68cf05abcafac96f0e30a1e71d4170343f5b8bd5acd671cacf35176bfe"} Nov 25 10:12:20 crc kubenswrapper[4687]: I1125 10:12:20.981463 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" event={"ID":"eeb46128-89b6-4a49-94a1-6bf513a5e80b","Type":"ContainerStarted","Data":"d39eb0b7d5e144e49b8ec8124e84706953762b843dea71574b7a61d69681d429"} Nov 25 10:12:20 crc kubenswrapper[4687]: I1125 10:12:20.993066 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" podStartSLOduration=1.993041729 podStartE2EDuration="1.993041729s" podCreationTimestamp="2025-11-25 10:12:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:12:20.992518165 +0000 UTC m=+4136.046157883" watchObservedRunningTime="2025-11-25 10:12:20.993041729 +0000 UTC m=+4136.046681447" Nov 25 10:12:29 crc kubenswrapper[4687]: I1125 10:12:29.734964 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:12:30 crc kubenswrapper[4687]: I1125 10:12:30.079171 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"f6be9ff931585212d9effca07fbb25c8a5e8790fec54c8590b61dc256c888a77"} Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.129151 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z2lqx"] Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.131513 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.145844 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z2lqx"] Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.247398 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-utilities\") pod \"redhat-marketplace-z2lqx\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.247445 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hvdt\" (UniqueName: \"kubernetes.io/projected/604532e7-b921-4731-abe4-47f29f720b99-kube-api-access-6hvdt\") pod \"redhat-marketplace-z2lqx\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.247476 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-catalog-content\") pod \"redhat-marketplace-z2lqx\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.349526 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-utilities\") pod \"redhat-marketplace-z2lqx\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.349594 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hvdt\" (UniqueName: \"kubernetes.io/projected/604532e7-b921-4731-abe4-47f29f720b99-kube-api-access-6hvdt\") pod \"redhat-marketplace-z2lqx\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.349636 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-catalog-content\") pod \"redhat-marketplace-z2lqx\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.350308 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-catalog-content\") pod \"redhat-marketplace-z2lqx\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.350629 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-utilities\") pod \"redhat-marketplace-z2lqx\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.622308 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hvdt\" (UniqueName: \"kubernetes.io/projected/604532e7-b921-4731-abe4-47f29f720b99-kube-api-access-6hvdt\") pod \"redhat-marketplace-z2lqx\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:42 crc kubenswrapper[4687]: I1125 10:12:42.753725 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:12:43 crc kubenswrapper[4687]: I1125 10:12:43.277988 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z2lqx"] Nov 25 10:12:43 crc kubenswrapper[4687]: W1125 10:12:43.289845 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod604532e7_b921_4731_abe4_47f29f720b99.slice/crio-6a063cbeb0b227ca2d0775f5a69754b66b8af13cc6b2d9f15585215709580f4f WatchSource:0}: Error finding container 6a063cbeb0b227ca2d0775f5a69754b66b8af13cc6b2d9f15585215709580f4f: Status 404 returned error can't find the container with id 6a063cbeb0b227ca2d0775f5a69754b66b8af13cc6b2d9f15585215709580f4f Nov 25 10:12:44 crc kubenswrapper[4687]: I1125 10:12:44.203366 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z2lqx" event={"ID":"604532e7-b921-4731-abe4-47f29f720b99","Type":"ContainerStarted","Data":"6a063cbeb0b227ca2d0775f5a69754b66b8af13cc6b2d9f15585215709580f4f"} Nov 25 10:12:47 crc kubenswrapper[4687]: I1125 10:12:47.263142 4687 generic.go:334] "Generic (PLEG): container finished" podID="604532e7-b921-4731-abe4-47f29f720b99" containerID="6e8648d985456f47fa7b727b1b8b4e8032dda2d417fa7c6d8e27e8a92f51562f" exitCode=0 Nov 25 10:12:47 crc kubenswrapper[4687]: I1125 10:12:47.263662 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z2lqx" event={"ID":"604532e7-b921-4731-abe4-47f29f720b99","Type":"ContainerDied","Data":"6e8648d985456f47fa7b727b1b8b4e8032dda2d417fa7c6d8e27e8a92f51562f"} Nov 25 10:12:47 crc kubenswrapper[4687]: I1125 10:12:47.265877 4687 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:12:54 crc kubenswrapper[4687]: I1125 10:12:54.335260 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z2lqx" event={"ID":"604532e7-b921-4731-abe4-47f29f720b99","Type":"ContainerStarted","Data":"3226015ab564d720c5b7bdfcdce9d1d0c2ce1813e0645d1b8611a8288a7b0046"} Nov 25 10:12:55 crc kubenswrapper[4687]: I1125 10:12:55.345518 4687 generic.go:334] "Generic (PLEG): container finished" podID="604532e7-b921-4731-abe4-47f29f720b99" containerID="3226015ab564d720c5b7bdfcdce9d1d0c2ce1813e0645d1b8611a8288a7b0046" exitCode=0 Nov 25 10:12:55 crc kubenswrapper[4687]: I1125 10:12:55.345611 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z2lqx" event={"ID":"604532e7-b921-4731-abe4-47f29f720b99","Type":"ContainerDied","Data":"3226015ab564d720c5b7bdfcdce9d1d0c2ce1813e0645d1b8611a8288a7b0046"} Nov 25 10:12:55 crc kubenswrapper[4687]: I1125 10:12:55.347754 4687 generic.go:334] "Generic (PLEG): container finished" podID="eeb46128-89b6-4a49-94a1-6bf513a5e80b" containerID="6e410b68cf05abcafac96f0e30a1e71d4170343f5b8bd5acd671cacf35176bfe" exitCode=0 Nov 25 10:12:55 crc kubenswrapper[4687]: I1125 10:12:55.347785 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" event={"ID":"eeb46128-89b6-4a49-94a1-6bf513a5e80b","Type":"ContainerDied","Data":"6e410b68cf05abcafac96f0e30a1e71d4170343f5b8bd5acd671cacf35176bfe"} Nov 25 10:12:56 crc kubenswrapper[4687]: I1125 10:12:56.460532 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" Nov 25 10:12:56 crc kubenswrapper[4687]: I1125 10:12:56.495540 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pc7f9/crc-debug-2d2mz"] Nov 25 10:12:56 crc kubenswrapper[4687]: I1125 10:12:56.507780 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pc7f9/crc-debug-2d2mz"] Nov 25 10:12:56 crc kubenswrapper[4687]: I1125 10:12:56.622070 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t57mm\" (UniqueName: \"kubernetes.io/projected/eeb46128-89b6-4a49-94a1-6bf513a5e80b-kube-api-access-t57mm\") pod \"eeb46128-89b6-4a49-94a1-6bf513a5e80b\" (UID: \"eeb46128-89b6-4a49-94a1-6bf513a5e80b\") " Nov 25 10:12:56 crc kubenswrapper[4687]: I1125 10:12:56.622433 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/eeb46128-89b6-4a49-94a1-6bf513a5e80b-host\") pod \"eeb46128-89b6-4a49-94a1-6bf513a5e80b\" (UID: \"eeb46128-89b6-4a49-94a1-6bf513a5e80b\") " Nov 25 10:12:56 crc kubenswrapper[4687]: I1125 10:12:56.622649 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb46128-89b6-4a49-94a1-6bf513a5e80b-host" (OuterVolumeSpecName: "host") pod "eeb46128-89b6-4a49-94a1-6bf513a5e80b" (UID: "eeb46128-89b6-4a49-94a1-6bf513a5e80b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:12:56 crc kubenswrapper[4687]: I1125 10:12:56.622924 4687 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/eeb46128-89b6-4a49-94a1-6bf513a5e80b-host\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:56 crc kubenswrapper[4687]: I1125 10:12:56.630738 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eeb46128-89b6-4a49-94a1-6bf513a5e80b-kube-api-access-t57mm" (OuterVolumeSpecName: "kube-api-access-t57mm") pod "eeb46128-89b6-4a49-94a1-6bf513a5e80b" (UID: "eeb46128-89b6-4a49-94a1-6bf513a5e80b"). InnerVolumeSpecName "kube-api-access-t57mm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:12:56 crc kubenswrapper[4687]: I1125 10:12:56.726548 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t57mm\" (UniqueName: \"kubernetes.io/projected/eeb46128-89b6-4a49-94a1-6bf513a5e80b-kube-api-access-t57mm\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:57 crc kubenswrapper[4687]: I1125 10:12:57.365068 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-2d2mz" Nov 25 10:12:57 crc kubenswrapper[4687]: I1125 10:12:57.365241 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d39eb0b7d5e144e49b8ec8124e84706953762b843dea71574b7a61d69681d429" Nov 25 10:12:57 crc kubenswrapper[4687]: I1125 10:12:57.367894 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z2lqx" event={"ID":"604532e7-b921-4731-abe4-47f29f720b99","Type":"ContainerStarted","Data":"57156f9239a9b549daed506c84744d2007b24988c8c524e4df700eece2acdbbe"} Nov 25 10:12:57 crc kubenswrapper[4687]: I1125 10:12:57.746915 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eeb46128-89b6-4a49-94a1-6bf513a5e80b" path="/var/lib/kubelet/pods/eeb46128-89b6-4a49-94a1-6bf513a5e80b/volumes" Nov 25 10:12:57 crc kubenswrapper[4687]: I1125 10:12:57.747530 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pc7f9/crc-debug-zfxrb"] Nov 25 10:12:57 crc kubenswrapper[4687]: E1125 10:12:57.747841 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb46128-89b6-4a49-94a1-6bf513a5e80b" containerName="container-00" Nov 25 10:12:57 crc kubenswrapper[4687]: I1125 10:12:57.747857 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb46128-89b6-4a49-94a1-6bf513a5e80b" containerName="container-00" Nov 25 10:12:57 crc kubenswrapper[4687]: I1125 10:12:57.749811 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb46128-89b6-4a49-94a1-6bf513a5e80b" containerName="container-00" Nov 25 10:12:57 crc kubenswrapper[4687]: I1125 10:12:57.750696 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" Nov 25 10:12:57 crc kubenswrapper[4687]: I1125 10:12:57.946673 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85b4a24d-8899-4aec-9d0a-62578bb0b56b-host\") pod \"crc-debug-zfxrb\" (UID: \"85b4a24d-8899-4aec-9d0a-62578bb0b56b\") " pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" Nov 25 10:12:57 crc kubenswrapper[4687]: I1125 10:12:57.946729 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6lb9\" (UniqueName: \"kubernetes.io/projected/85b4a24d-8899-4aec-9d0a-62578bb0b56b-kube-api-access-j6lb9\") pod \"crc-debug-zfxrb\" (UID: \"85b4a24d-8899-4aec-9d0a-62578bb0b56b\") " pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" Nov 25 10:12:58 crc kubenswrapper[4687]: I1125 10:12:58.048989 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85b4a24d-8899-4aec-9d0a-62578bb0b56b-host\") pod \"crc-debug-zfxrb\" (UID: \"85b4a24d-8899-4aec-9d0a-62578bb0b56b\") " pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" Nov 25 10:12:58 crc kubenswrapper[4687]: I1125 10:12:58.049452 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6lb9\" (UniqueName: \"kubernetes.io/projected/85b4a24d-8899-4aec-9d0a-62578bb0b56b-kube-api-access-j6lb9\") pod \"crc-debug-zfxrb\" (UID: \"85b4a24d-8899-4aec-9d0a-62578bb0b56b\") " pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" Nov 25 10:12:58 crc kubenswrapper[4687]: I1125 10:12:58.049122 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85b4a24d-8899-4aec-9d0a-62578bb0b56b-host\") pod \"crc-debug-zfxrb\" (UID: \"85b4a24d-8899-4aec-9d0a-62578bb0b56b\") " pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" Nov 25 10:12:58 crc kubenswrapper[4687]: I1125 10:12:58.070199 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6lb9\" (UniqueName: \"kubernetes.io/projected/85b4a24d-8899-4aec-9d0a-62578bb0b56b-kube-api-access-j6lb9\") pod \"crc-debug-zfxrb\" (UID: \"85b4a24d-8899-4aec-9d0a-62578bb0b56b\") " pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" Nov 25 10:12:58 crc kubenswrapper[4687]: I1125 10:12:58.370559 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" Nov 25 10:12:58 crc kubenswrapper[4687]: W1125 10:12:58.409911 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod85b4a24d_8899_4aec_9d0a_62578bb0b56b.slice/crio-624637dc71260cb7357b689a07261dfb6958d65b1cc27e95dae60c73fdb51575 WatchSource:0}: Error finding container 624637dc71260cb7357b689a07261dfb6958d65b1cc27e95dae60c73fdb51575: Status 404 returned error can't find the container with id 624637dc71260cb7357b689a07261dfb6958d65b1cc27e95dae60c73fdb51575 Nov 25 10:12:58 crc kubenswrapper[4687]: I1125 10:12:58.417364 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z2lqx" podStartSLOduration=6.629382432 podStartE2EDuration="16.417342681s" podCreationTimestamp="2025-11-25 10:12:42 +0000 UTC" firstStartedPulling="2025-11-25 10:12:47.265618769 +0000 UTC m=+4162.319258487" lastFinishedPulling="2025-11-25 10:12:57.053579018 +0000 UTC m=+4172.107218736" observedRunningTime="2025-11-25 10:12:58.402872908 +0000 UTC m=+4173.456512626" watchObservedRunningTime="2025-11-25 10:12:58.417342681 +0000 UTC m=+4173.470982399" Nov 25 10:12:59 crc kubenswrapper[4687]: I1125 10:12:59.385825 4687 generic.go:334] "Generic (PLEG): container finished" podID="85b4a24d-8899-4aec-9d0a-62578bb0b56b" containerID="af6e9841c09afc2604eb05243457e3779124144efc506467599804a48965414d" exitCode=0 Nov 25 10:12:59 crc kubenswrapper[4687]: I1125 10:12:59.385875 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" event={"ID":"85b4a24d-8899-4aec-9d0a-62578bb0b56b","Type":"ContainerDied","Data":"af6e9841c09afc2604eb05243457e3779124144efc506467599804a48965414d"} Nov 25 10:12:59 crc kubenswrapper[4687]: I1125 10:12:59.386136 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" event={"ID":"85b4a24d-8899-4aec-9d0a-62578bb0b56b","Type":"ContainerStarted","Data":"624637dc71260cb7357b689a07261dfb6958d65b1cc27e95dae60c73fdb51575"} Nov 25 10:12:59 crc kubenswrapper[4687]: I1125 10:12:59.759120 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pc7f9/crc-debug-zfxrb"] Nov 25 10:12:59 crc kubenswrapper[4687]: I1125 10:12:59.804362 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pc7f9/crc-debug-zfxrb"] Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.503772 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.603521 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6lb9\" (UniqueName: \"kubernetes.io/projected/85b4a24d-8899-4aec-9d0a-62578bb0b56b-kube-api-access-j6lb9\") pod \"85b4a24d-8899-4aec-9d0a-62578bb0b56b\" (UID: \"85b4a24d-8899-4aec-9d0a-62578bb0b56b\") " Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.603717 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85b4a24d-8899-4aec-9d0a-62578bb0b56b-host\") pod \"85b4a24d-8899-4aec-9d0a-62578bb0b56b\" (UID: \"85b4a24d-8899-4aec-9d0a-62578bb0b56b\") " Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.604090 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/85b4a24d-8899-4aec-9d0a-62578bb0b56b-host" (OuterVolumeSpecName: "host") pod "85b4a24d-8899-4aec-9d0a-62578bb0b56b" (UID: "85b4a24d-8899-4aec-9d0a-62578bb0b56b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.604454 4687 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85b4a24d-8899-4aec-9d0a-62578bb0b56b-host\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.609882 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85b4a24d-8899-4aec-9d0a-62578bb0b56b-kube-api-access-j6lb9" (OuterVolumeSpecName: "kube-api-access-j6lb9") pod "85b4a24d-8899-4aec-9d0a-62578bb0b56b" (UID: "85b4a24d-8899-4aec-9d0a-62578bb0b56b"). InnerVolumeSpecName "kube-api-access-j6lb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.707437 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6lb9\" (UniqueName: \"kubernetes.io/projected/85b4a24d-8899-4aec-9d0a-62578bb0b56b-kube-api-access-j6lb9\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.988997 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pc7f9/crc-debug-glq89"] Nov 25 10:13:00 crc kubenswrapper[4687]: E1125 10:13:00.989479 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85b4a24d-8899-4aec-9d0a-62578bb0b56b" containerName="container-00" Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.989524 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="85b4a24d-8899-4aec-9d0a-62578bb0b56b" containerName="container-00" Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.989780 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="85b4a24d-8899-4aec-9d0a-62578bb0b56b" containerName="container-00" Nov 25 10:13:00 crc kubenswrapper[4687]: I1125 10:13:00.990429 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-glq89" Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.013849 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfjdb\" (UniqueName: \"kubernetes.io/projected/92b97828-9989-4f8a-af1e-8ab5885892c2-kube-api-access-cfjdb\") pod \"crc-debug-glq89\" (UID: \"92b97828-9989-4f8a-af1e-8ab5885892c2\") " pod="openshift-must-gather-pc7f9/crc-debug-glq89" Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.013925 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/92b97828-9989-4f8a-af1e-8ab5885892c2-host\") pod \"crc-debug-glq89\" (UID: \"92b97828-9989-4f8a-af1e-8ab5885892c2\") " pod="openshift-must-gather-pc7f9/crc-debug-glq89" Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.114791 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfjdb\" (UniqueName: \"kubernetes.io/projected/92b97828-9989-4f8a-af1e-8ab5885892c2-kube-api-access-cfjdb\") pod \"crc-debug-glq89\" (UID: \"92b97828-9989-4f8a-af1e-8ab5885892c2\") " pod="openshift-must-gather-pc7f9/crc-debug-glq89" Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.115223 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/92b97828-9989-4f8a-af1e-8ab5885892c2-host\") pod \"crc-debug-glq89\" (UID: \"92b97828-9989-4f8a-af1e-8ab5885892c2\") " pod="openshift-must-gather-pc7f9/crc-debug-glq89" Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.115313 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/92b97828-9989-4f8a-af1e-8ab5885892c2-host\") pod \"crc-debug-glq89\" (UID: \"92b97828-9989-4f8a-af1e-8ab5885892c2\") " pod="openshift-must-gather-pc7f9/crc-debug-glq89" Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.140816 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfjdb\" (UniqueName: \"kubernetes.io/projected/92b97828-9989-4f8a-af1e-8ab5885892c2-kube-api-access-cfjdb\") pod \"crc-debug-glq89\" (UID: \"92b97828-9989-4f8a-af1e-8ab5885892c2\") " pod="openshift-must-gather-pc7f9/crc-debug-glq89" Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.308956 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-glq89" Nov 25 10:13:01 crc kubenswrapper[4687]: W1125 10:13:01.339324 4687 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod92b97828_9989_4f8a_af1e_8ab5885892c2.slice/crio-15d38a0bdbd75ec8d31a9220ce1a12334ebe8db3f2909500ce2eb93e13868e0f WatchSource:0}: Error finding container 15d38a0bdbd75ec8d31a9220ce1a12334ebe8db3f2909500ce2eb93e13868e0f: Status 404 returned error can't find the container with id 15d38a0bdbd75ec8d31a9220ce1a12334ebe8db3f2909500ce2eb93e13868e0f Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.402322 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/crc-debug-glq89" event={"ID":"92b97828-9989-4f8a-af1e-8ab5885892c2","Type":"ContainerStarted","Data":"15d38a0bdbd75ec8d31a9220ce1a12334ebe8db3f2909500ce2eb93e13868e0f"} Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.407141 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="624637dc71260cb7357b689a07261dfb6958d65b1cc27e95dae60c73fdb51575" Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.407179 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-zfxrb" Nov 25 10:13:01 crc kubenswrapper[4687]: I1125 10:13:01.747605 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85b4a24d-8899-4aec-9d0a-62578bb0b56b" path="/var/lib/kubelet/pods/85b4a24d-8899-4aec-9d0a-62578bb0b56b/volumes" Nov 25 10:13:02 crc kubenswrapper[4687]: I1125 10:13:02.420210 4687 generic.go:334] "Generic (PLEG): container finished" podID="92b97828-9989-4f8a-af1e-8ab5885892c2" containerID="25bc4fb971157fa4c975ab03779594aa1ca826455906f0e8a3d373d9985f2814" exitCode=0 Nov 25 10:13:02 crc kubenswrapper[4687]: I1125 10:13:02.420320 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/crc-debug-glq89" event={"ID":"92b97828-9989-4f8a-af1e-8ab5885892c2","Type":"ContainerDied","Data":"25bc4fb971157fa4c975ab03779594aa1ca826455906f0e8a3d373d9985f2814"} Nov 25 10:13:02 crc kubenswrapper[4687]: I1125 10:13:02.463575 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pc7f9/crc-debug-glq89"] Nov 25 10:13:02 crc kubenswrapper[4687]: I1125 10:13:02.471478 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pc7f9/crc-debug-glq89"] Nov 25 10:13:02 crc kubenswrapper[4687]: I1125 10:13:02.759715 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:13:02 crc kubenswrapper[4687]: I1125 10:13:02.759791 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:13:02 crc kubenswrapper[4687]: I1125 10:13:02.861753 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:13:03 crc kubenswrapper[4687]: I1125 10:13:03.806928 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:13:03 crc kubenswrapper[4687]: I1125 10:13:03.829087 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-glq89" Nov 25 10:13:03 crc kubenswrapper[4687]: I1125 10:13:03.855338 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z2lqx"] Nov 25 10:13:03 crc kubenswrapper[4687]: I1125 10:13:03.885680 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfjdb\" (UniqueName: \"kubernetes.io/projected/92b97828-9989-4f8a-af1e-8ab5885892c2-kube-api-access-cfjdb\") pod \"92b97828-9989-4f8a-af1e-8ab5885892c2\" (UID: \"92b97828-9989-4f8a-af1e-8ab5885892c2\") " Nov 25 10:13:03 crc kubenswrapper[4687]: I1125 10:13:03.885759 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/92b97828-9989-4f8a-af1e-8ab5885892c2-host\") pod \"92b97828-9989-4f8a-af1e-8ab5885892c2\" (UID: \"92b97828-9989-4f8a-af1e-8ab5885892c2\") " Nov 25 10:13:03 crc kubenswrapper[4687]: I1125 10:13:03.885935 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/92b97828-9989-4f8a-af1e-8ab5885892c2-host" (OuterVolumeSpecName: "host") pod "92b97828-9989-4f8a-af1e-8ab5885892c2" (UID: "92b97828-9989-4f8a-af1e-8ab5885892c2"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:13:03 crc kubenswrapper[4687]: I1125 10:13:03.886252 4687 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/92b97828-9989-4f8a-af1e-8ab5885892c2-host\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:03 crc kubenswrapper[4687]: I1125 10:13:03.891127 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92b97828-9989-4f8a-af1e-8ab5885892c2-kube-api-access-cfjdb" (OuterVolumeSpecName: "kube-api-access-cfjdb") pod "92b97828-9989-4f8a-af1e-8ab5885892c2" (UID: "92b97828-9989-4f8a-af1e-8ab5885892c2"). InnerVolumeSpecName "kube-api-access-cfjdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:03 crc kubenswrapper[4687]: I1125 10:13:03.988159 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfjdb\" (UniqueName: \"kubernetes.io/projected/92b97828-9989-4f8a-af1e-8ab5885892c2-kube-api-access-cfjdb\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:04 crc kubenswrapper[4687]: I1125 10:13:04.445738 4687 scope.go:117] "RemoveContainer" containerID="25bc4fb971157fa4c975ab03779594aa1ca826455906f0e8a3d373d9985f2814" Nov 25 10:13:04 crc kubenswrapper[4687]: I1125 10:13:04.445751 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/crc-debug-glq89" Nov 25 10:13:05 crc kubenswrapper[4687]: I1125 10:13:05.452149 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z2lqx" podUID="604532e7-b921-4731-abe4-47f29f720b99" containerName="registry-server" containerID="cri-o://57156f9239a9b549daed506c84744d2007b24988c8c524e4df700eece2acdbbe" gracePeriod=2 Nov 25 10:13:05 crc kubenswrapper[4687]: I1125 10:13:05.765679 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92b97828-9989-4f8a-af1e-8ab5885892c2" path="/var/lib/kubelet/pods/92b97828-9989-4f8a-af1e-8ab5885892c2/volumes" Nov 25 10:13:06 crc kubenswrapper[4687]: I1125 10:13:06.468854 4687 generic.go:334] "Generic (PLEG): container finished" podID="604532e7-b921-4731-abe4-47f29f720b99" containerID="57156f9239a9b549daed506c84744d2007b24988c8c524e4df700eece2acdbbe" exitCode=0 Nov 25 10:13:06 crc kubenswrapper[4687]: I1125 10:13:06.468953 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z2lqx" event={"ID":"604532e7-b921-4731-abe4-47f29f720b99","Type":"ContainerDied","Data":"57156f9239a9b549daed506c84744d2007b24988c8c524e4df700eece2acdbbe"} Nov 25 10:13:06 crc kubenswrapper[4687]: I1125 10:13:06.959317 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.043513 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hvdt\" (UniqueName: \"kubernetes.io/projected/604532e7-b921-4731-abe4-47f29f720b99-kube-api-access-6hvdt\") pod \"604532e7-b921-4731-abe4-47f29f720b99\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.043580 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-utilities\") pod \"604532e7-b921-4731-abe4-47f29f720b99\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.043617 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-catalog-content\") pod \"604532e7-b921-4731-abe4-47f29f720b99\" (UID: \"604532e7-b921-4731-abe4-47f29f720b99\") " Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.044633 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-utilities" (OuterVolumeSpecName: "utilities") pod "604532e7-b921-4731-abe4-47f29f720b99" (UID: "604532e7-b921-4731-abe4-47f29f720b99"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.049345 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/604532e7-b921-4731-abe4-47f29f720b99-kube-api-access-6hvdt" (OuterVolumeSpecName: "kube-api-access-6hvdt") pod "604532e7-b921-4731-abe4-47f29f720b99" (UID: "604532e7-b921-4731-abe4-47f29f720b99"). InnerVolumeSpecName "kube-api-access-6hvdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.062284 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "604532e7-b921-4731-abe4-47f29f720b99" (UID: "604532e7-b921-4731-abe4-47f29f720b99"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.145213 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hvdt\" (UniqueName: \"kubernetes.io/projected/604532e7-b921-4731-abe4-47f29f720b99-kube-api-access-6hvdt\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.145490 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.145587 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604532e7-b921-4731-abe4-47f29f720b99-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.478214 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z2lqx" event={"ID":"604532e7-b921-4731-abe4-47f29f720b99","Type":"ContainerDied","Data":"6a063cbeb0b227ca2d0775f5a69754b66b8af13cc6b2d9f15585215709580f4f"} Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.478261 4687 scope.go:117] "RemoveContainer" containerID="57156f9239a9b549daed506c84744d2007b24988c8c524e4df700eece2acdbbe" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.478377 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z2lqx" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.517305 4687 scope.go:117] "RemoveContainer" containerID="3226015ab564d720c5b7bdfcdce9d1d0c2ce1813e0645d1b8611a8288a7b0046" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.522280 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z2lqx"] Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.534401 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z2lqx"] Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.540306 4687 scope.go:117] "RemoveContainer" containerID="6e8648d985456f47fa7b727b1b8b4e8032dda2d417fa7c6d8e27e8a92f51562f" Nov 25 10:13:07 crc kubenswrapper[4687]: I1125 10:13:07.746798 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="604532e7-b921-4731-abe4-47f29f720b99" path="/var/lib/kubelet/pods/604532e7-b921-4731-abe4-47f29f720b99/volumes" Nov 25 10:13:39 crc kubenswrapper[4687]: I1125 10:13:39.135454 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6fd7f79f6b-n9xqk_972adc4c-cd8b-4ead-a7da-1f21cf692157/barbican-api/0.log" Nov 25 10:13:39 crc kubenswrapper[4687]: I1125 10:13:39.202886 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6fd7f79f6b-n9xqk_972adc4c-cd8b-4ead-a7da-1f21cf692157/barbican-api-log/0.log" Nov 25 10:13:39 crc kubenswrapper[4687]: I1125 10:13:39.300481 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-674b9465d-cz7jd_50819727-088c-4d7f-bff7-c95d3d2ece69/barbican-keystone-listener/0.log" Nov 25 10:13:39 crc kubenswrapper[4687]: I1125 10:13:39.371633 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-674b9465d-cz7jd_50819727-088c-4d7f-bff7-c95d3d2ece69/barbican-keystone-listener-log/0.log" Nov 25 10:13:39 crc kubenswrapper[4687]: I1125 10:13:39.514437 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-c8494b867-8fmmw_90df3ed8-0bc0-4a26-940d-13dd51fd575a/barbican-worker/0.log" Nov 25 10:13:39 crc kubenswrapper[4687]: I1125 10:13:39.520774 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-c8494b867-8fmmw_90df3ed8-0bc0-4a26-940d-13dd51fd575a/barbican-worker-log/0.log" Nov 25 10:13:39 crc kubenswrapper[4687]: I1125 10:13:39.679430 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-r99jg_34a409ae-58d8-4746-83e8-f93d0e449216/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:39 crc kubenswrapper[4687]: I1125 10:13:39.790661 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_14bf5e9a-1354-4b0d-a475-2d3de20a07fd/ceilometer-central-agent/0.log" Nov 25 10:13:39 crc kubenswrapper[4687]: I1125 10:13:39.893231 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_14bf5e9a-1354-4b0d-a475-2d3de20a07fd/ceilometer-notification-agent/0.log" Nov 25 10:13:40 crc kubenswrapper[4687]: I1125 10:13:40.005719 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_14bf5e9a-1354-4b0d-a475-2d3de20a07fd/proxy-httpd/0.log" Nov 25 10:13:40 crc kubenswrapper[4687]: I1125 10:13:40.114681 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_14bf5e9a-1354-4b0d-a475-2d3de20a07fd/sg-core/0.log" Nov 25 10:13:40 crc kubenswrapper[4687]: I1125 10:13:40.191243 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bc9b51e3-3417-4e5d-86f6-2322c956f540/cinder-api/0.log" Nov 25 10:13:40 crc kubenswrapper[4687]: I1125 10:13:40.226812 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bc9b51e3-3417-4e5d-86f6-2322c956f540/cinder-api-log/0.log" Nov 25 10:13:40 crc kubenswrapper[4687]: I1125 10:13:40.407079 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_48978802-f2a2-41ad-bc63-e71b66b0747f/cinder-scheduler/0.log" Nov 25 10:13:40 crc kubenswrapper[4687]: I1125 10:13:40.417961 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_48978802-f2a2-41ad-bc63-e71b66b0747f/probe/0.log" Nov 25 10:13:40 crc kubenswrapper[4687]: I1125 10:13:40.586567 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-c6vdr_d0a8b457-e5a2-4516-ab72-a2bf93e2b8bc/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:40 crc kubenswrapper[4687]: I1125 10:13:40.672479 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-tp8mr_906f3fa0-c7e9-40dc-a876-0e0c9cdbc272/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:40 crc kubenswrapper[4687]: I1125 10:13:40.782419 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-bjn64_68d33f41-95f3-41f3-847a-76b368d367cd/init/0.log" Nov 25 10:13:40 crc kubenswrapper[4687]: I1125 10:13:40.985295 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-bjn64_68d33f41-95f3-41f3-847a-76b368d367cd/init/0.log" Nov 25 10:13:41 crc kubenswrapper[4687]: I1125 10:13:41.133826 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-z6m94_c2600c05-1335-45a5-b7d3-4bfd661d7884/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:41 crc kubenswrapper[4687]: I1125 10:13:41.142296 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-bjn64_68d33f41-95f3-41f3-847a-76b368d367cd/dnsmasq-dns/0.log" Nov 25 10:13:41 crc kubenswrapper[4687]: I1125 10:13:41.330984 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_603378ac-d3a5-43ec-bd0f-b4237683f553/glance-httpd/0.log" Nov 25 10:13:41 crc kubenswrapper[4687]: I1125 10:13:41.370826 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_603378ac-d3a5-43ec-bd0f-b4237683f553/glance-log/0.log" Nov 25 10:13:41 crc kubenswrapper[4687]: I1125 10:13:41.486337 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_255349d3-1260-430d-a74a-2fa4027d92b5/glance-httpd/0.log" Nov 25 10:13:41 crc kubenswrapper[4687]: I1125 10:13:41.534990 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_255349d3-1260-430d-a74a-2fa4027d92b5/glance-log/0.log" Nov 25 10:13:42 crc kubenswrapper[4687]: I1125 10:13:42.066113 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-69b7bcc78d-r6t7q_e4c7abdb-6d41-42c3-a228-27ebd825e7b5/horizon/0.log" Nov 25 10:13:42 crc kubenswrapper[4687]: I1125 10:13:42.216602 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-k8fdd_04071c43-9814-4c88-bd7e-f3b1c83f9dfc/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:42 crc kubenswrapper[4687]: I1125 10:13:42.314931 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-69b7bcc78d-r6t7q_e4c7abdb-6d41-42c3-a228-27ebd825e7b5/horizon-log/0.log" Nov 25 10:13:42 crc kubenswrapper[4687]: I1125 10:13:42.339859 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-jh85t_29a959b9-db17-40b5-8c9b-f54bc3548ca2/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:42 crc kubenswrapper[4687]: I1125 10:13:42.648083 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6cd548ffc8-p78fk_f8e2361f-7cd6-4055-8e0d-a53eda846c23/keystone-api/0.log" Nov 25 10:13:42 crc kubenswrapper[4687]: I1125 10:13:42.654824 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401081-f9gz7_0fce13d2-b073-4d13-ae67-6a4a079ae3f1/keystone-cron/0.log" Nov 25 10:13:42 crc kubenswrapper[4687]: I1125 10:13:42.881258 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_20082470-4513-4042-8a66-3117b8a387f4/kube-state-metrics/0.log" Nov 25 10:13:42 crc kubenswrapper[4687]: I1125 10:13:42.908411 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-6dpqn_7cf72d64-3a5f-42c4-a290-2244169a8a60/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:43 crc kubenswrapper[4687]: I1125 10:13:43.316334 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-fdc69b5cc-jz28l_e23cf6de-4d7f-40f1-aac9-a397b1c8bb36/neutron-httpd/0.log" Nov 25 10:13:43 crc kubenswrapper[4687]: I1125 10:13:43.357448 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-fdc69b5cc-jz28l_e23cf6de-4d7f-40f1-aac9-a397b1c8bb36/neutron-api/0.log" Nov 25 10:13:43 crc kubenswrapper[4687]: I1125 10:13:43.487035 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-78c56_ec0b61ae-ccac-473b-ab43-e21daf1c348e/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:44 crc kubenswrapper[4687]: I1125 10:13:44.021605 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_8ef7fc72-708b-4994-9ced-44ec353121fc/nova-api-log/0.log" Nov 25 10:13:44 crc kubenswrapper[4687]: I1125 10:13:44.068429 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_e891aa7c-cb45-432d-9a15-1194e9700272/nova-cell0-conductor-conductor/0.log" Nov 25 10:13:44 crc kubenswrapper[4687]: I1125 10:13:44.407887 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_4f859978-9653-48fd-9c45-a2eb11561c0d/nova-cell1-conductor-conductor/0.log" Nov 25 10:13:44 crc kubenswrapper[4687]: I1125 10:13:44.430039 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_8ef7fc72-708b-4994-9ced-44ec353121fc/nova-api-api/0.log" Nov 25 10:13:44 crc kubenswrapper[4687]: I1125 10:13:44.459464 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_059b7fdf-9ca4-4f03-afa0-ee554a6aa858/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 10:13:45 crc kubenswrapper[4687]: I1125 10:13:45.373442 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-lvpfx_74866639-8460-4684-afe9-2e19c59db722/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:45 crc kubenswrapper[4687]: I1125 10:13:45.379165 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_28054f13-f14d-47dd-a07f-2e56cd710565/nova-metadata-log/0.log" Nov 25 10:13:45 crc kubenswrapper[4687]: I1125 10:13:45.835923 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b1c0236e-917b-4c65-a9b7-6d3508c1f4a8/mysql-bootstrap/0.log" Nov 25 10:13:45 crc kubenswrapper[4687]: I1125 10:13:45.888323 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_9e4aadac-caf5-4702-98c7-648843339aa5/nova-scheduler-scheduler/0.log" Nov 25 10:13:46 crc kubenswrapper[4687]: I1125 10:13:46.054045 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b1c0236e-917b-4c65-a9b7-6d3508c1f4a8/mysql-bootstrap/0.log" Nov 25 10:13:46 crc kubenswrapper[4687]: I1125 10:13:46.102652 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b1c0236e-917b-4c65-a9b7-6d3508c1f4a8/galera/0.log" Nov 25 10:13:46 crc kubenswrapper[4687]: I1125 10:13:46.264652 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7/mysql-bootstrap/0.log" Nov 25 10:13:46 crc kubenswrapper[4687]: I1125 10:13:46.649076 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_28054f13-f14d-47dd-a07f-2e56cd710565/nova-metadata-metadata/0.log" Nov 25 10:13:46 crc kubenswrapper[4687]: I1125 10:13:46.832033 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7/mysql-bootstrap/0.log" Nov 25 10:13:46 crc kubenswrapper[4687]: I1125 10:13:46.902675 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f9eb7eb8-0cce-4eaa-b2b1-7b1fec140cd7/galera/0.log" Nov 25 10:13:46 crc kubenswrapper[4687]: I1125 10:13:46.965117 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_f5dea615-cb9c-48fc-a557-9d8fbac041ac/openstackclient/0.log" Nov 25 10:13:47 crc kubenswrapper[4687]: I1125 10:13:47.165843 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-g5gzm_b9a1cd27-5b10-422d-9629-a5a6c0bc128a/openstack-network-exporter/0.log" Nov 25 10:13:47 crc kubenswrapper[4687]: I1125 10:13:47.225278 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-fdpzn_6894bad0-9f1e-4d44-89a3-b06c6b24495a/ovn-controller/0.log" Nov 25 10:13:47 crc kubenswrapper[4687]: I1125 10:13:47.458669 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hstvx_ec3fcef6-578a-4687-9af6-18d6de32f1e1/ovsdb-server-init/0.log" Nov 25 10:13:47 crc kubenswrapper[4687]: I1125 10:13:47.583721 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hstvx_ec3fcef6-578a-4687-9af6-18d6de32f1e1/ovs-vswitchd/0.log" Nov 25 10:13:47 crc kubenswrapper[4687]: I1125 10:13:47.589841 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hstvx_ec3fcef6-578a-4687-9af6-18d6de32f1e1/ovsdb-server/0.log" Nov 25 10:13:47 crc kubenswrapper[4687]: I1125 10:13:47.637621 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hstvx_ec3fcef6-578a-4687-9af6-18d6de32f1e1/ovsdb-server-init/0.log" Nov 25 10:13:47 crc kubenswrapper[4687]: I1125 10:13:47.840686 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4e2efef0-3880-4d7a-bd93-59b596e470b8/openstack-network-exporter/0.log" Nov 25 10:13:47 crc kubenswrapper[4687]: I1125 10:13:47.851375 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-wwsqh_db45ca10-ced6-46c4-84e8-ac525cd596b4/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:47 crc kubenswrapper[4687]: I1125 10:13:47.884121 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4e2efef0-3880-4d7a-bd93-59b596e470b8/ovn-northd/0.log" Nov 25 10:13:48 crc kubenswrapper[4687]: I1125 10:13:48.114210 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9d20bc24-507c-4712-8c05-c8d3cfd4e87f/ovsdbserver-nb/0.log" Nov 25 10:13:48 crc kubenswrapper[4687]: I1125 10:13:48.138781 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9d20bc24-507c-4712-8c05-c8d3cfd4e87f/openstack-network-exporter/0.log" Nov 25 10:13:48 crc kubenswrapper[4687]: I1125 10:13:48.305088 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ae74a803-c417-4ab8-8842-20a575b77dd3/openstack-network-exporter/0.log" Nov 25 10:13:48 crc kubenswrapper[4687]: I1125 10:13:48.355992 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ae74a803-c417-4ab8-8842-20a575b77dd3/ovsdbserver-sb/0.log" Nov 25 10:13:48 crc kubenswrapper[4687]: I1125 10:13:48.772936 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bf794b984-bbcp5_ca801008-2024-4b8d-a69b-2f468a78f1a1/placement-api/0.log" Nov 25 10:13:48 crc kubenswrapper[4687]: I1125 10:13:48.784869 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_0f0fb06f-00e1-471a-855b-88f34608ca01/setup-container/0.log" Nov 25 10:13:48 crc kubenswrapper[4687]: I1125 10:13:48.787900 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bf794b984-bbcp5_ca801008-2024-4b8d-a69b-2f468a78f1a1/placement-log/0.log" Nov 25 10:13:49 crc kubenswrapper[4687]: I1125 10:13:49.054930 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_0f0fb06f-00e1-471a-855b-88f34608ca01/setup-container/0.log" Nov 25 10:13:49 crc kubenswrapper[4687]: I1125 10:13:49.063440 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_0f0fb06f-00e1-471a-855b-88f34608ca01/rabbitmq/0.log" Nov 25 10:13:49 crc kubenswrapper[4687]: I1125 10:13:49.101692 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_244d6f11-290e-4cbe-95b7-04b7555090a9/setup-container/0.log" Nov 25 10:13:49 crc kubenswrapper[4687]: I1125 10:13:49.282335 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_244d6f11-290e-4cbe-95b7-04b7555090a9/setup-container/0.log" Nov 25 10:13:49 crc kubenswrapper[4687]: I1125 10:13:49.341070 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_244d6f11-290e-4cbe-95b7-04b7555090a9/rabbitmq/0.log" Nov 25 10:13:49 crc kubenswrapper[4687]: I1125 10:13:49.392098 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-7kh68_9896502d-3c95-47e0-b75a-855221a19ebc/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:49 crc kubenswrapper[4687]: I1125 10:13:49.562608 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-xp2dr_0d29aa5d-d14b-4a11-9929-84c1770afb05/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:49 crc kubenswrapper[4687]: I1125 10:13:49.586641 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-nb7kk_59cc9836-1eba-484b-9c23-78e3368be44c/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:49 crc kubenswrapper[4687]: I1125 10:13:49.805518 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-hcd94_84e0ed5c-dd14-40ef-bc65-5066ae662f34/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.136826 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6cc97f684c-lcsst_e4a5addf-9956-45b0-b761-affcce71a048/proxy-server/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.137702 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-c42m9_a2095b70-3311-4f0a-a052-5c8f686fd304/ssh-known-hosts-edpm-deployment/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.174779 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6cc97f684c-lcsst_e4a5addf-9956-45b0-b761-affcce71a048/proxy-httpd/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.364566 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-7x797_aee1a7b3-633b-455a-903a-7b00ef90ea07/swift-ring-rebalance/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.456886 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/account-auditor/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.482590 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/account-reaper/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.597479 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/account-replicator/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.655111 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/account-server/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.703584 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/container-auditor/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.764173 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/container-replicator/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.850659 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/container-server/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.879067 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/container-updater/0.log" Nov 25 10:13:50 crc kubenswrapper[4687]: I1125 10:13:50.910578 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/object-auditor/0.log" Nov 25 10:13:51 crc kubenswrapper[4687]: I1125 10:13:51.024259 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/object-expirer/0.log" Nov 25 10:13:51 crc kubenswrapper[4687]: I1125 10:13:51.071294 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/object-replicator/0.log" Nov 25 10:13:51 crc kubenswrapper[4687]: I1125 10:13:51.098441 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/object-server/0.log" Nov 25 10:13:51 crc kubenswrapper[4687]: I1125 10:13:51.148216 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/object-updater/0.log" Nov 25 10:13:51 crc kubenswrapper[4687]: I1125 10:13:51.253167 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/rsync/0.log" Nov 25 10:13:51 crc kubenswrapper[4687]: I1125 10:13:51.296537 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f6a5d97-063d-47e3-b049-fd2b9b46ee77/swift-recon-cron/0.log" Nov 25 10:13:51 crc kubenswrapper[4687]: I1125 10:13:51.338349 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-jfmbq_cc7503d0-7742-479f-94f2-d2fbffd48809/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:13:51 crc kubenswrapper[4687]: I1125 10:13:51.518078 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_f60c7882-f90a-4cfd-93a4-1cf51c29315a/tempest-tests-tempest-tests-runner/0.log" Nov 25 10:13:51 crc kubenswrapper[4687]: I1125 10:13:51.607360 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_b6d8db87-1023-4e20-99ca-6a755dc19fe3/test-operator-logs-container/0.log" Nov 25 10:13:51 crc kubenswrapper[4687]: I1125 10:13:51.776447 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-wnpct_ba70f059-0179-41d0-b0fe-2f0e0b4db2a7/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 10:14:01 crc kubenswrapper[4687]: I1125 10:14:01.265681 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_8b16fcc2-1dd1-47d5-979a-f50611173736/memcached/0.log" Nov 25 10:14:17 crc kubenswrapper[4687]: I1125 10:14:17.649000 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/util/0.log" Nov 25 10:14:17 crc kubenswrapper[4687]: I1125 10:14:17.854846 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/pull/0.log" Nov 25 10:14:17 crc kubenswrapper[4687]: I1125 10:14:17.879103 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/pull/0.log" Nov 25 10:14:18 crc kubenswrapper[4687]: I1125 10:14:18.111257 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/extract/0.log" Nov 25 10:14:18 crc kubenswrapper[4687]: I1125 10:14:18.765236 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/util/0.log" Nov 25 10:14:18 crc kubenswrapper[4687]: I1125 10:14:18.775671 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/util/0.log" Nov 25 10:14:18 crc kubenswrapper[4687]: I1125 10:14:18.789952 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_4b286b9d309b00a43da2b756334a22622337dc26dbd009d14536f04fd9fd8zb_989d467f-e529-48b1-ac6b-f8509d9ae3f8/pull/0.log" Nov 25 10:14:18 crc kubenswrapper[4687]: I1125 10:14:18.891377 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-4dn9x_a5f5d45b-b0ce-48f8-892e-02571e1f9f24/kube-rbac-proxy/0.log" Nov 25 10:14:18 crc kubenswrapper[4687]: I1125 10:14:18.968801 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-ckt6f_26dc2622-a74f-405c-9bbb-291adb145908/kube-rbac-proxy/0.log" Nov 25 10:14:18 crc kubenswrapper[4687]: I1125 10:14:18.974852 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-4dn9x_a5f5d45b-b0ce-48f8-892e-02571e1f9f24/manager/0.log" Nov 25 10:14:19 crc kubenswrapper[4687]: I1125 10:14:19.138743 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-ckt6f_26dc2622-a74f-405c-9bbb-291adb145908/manager/0.log" Nov 25 10:14:19 crc kubenswrapper[4687]: I1125 10:14:19.171672 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-jcrff_4d75764d-49d9-4482-98a9-728dd977f2bd/manager/0.log" Nov 25 10:14:19 crc kubenswrapper[4687]: I1125 10:14:19.173626 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-jcrff_4d75764d-49d9-4482-98a9-728dd977f2bd/kube-rbac-proxy/0.log" Nov 25 10:14:19 crc kubenswrapper[4687]: I1125 10:14:19.325382 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-wxt5h_ed7a2e30-6110-4b1c-864f-4856c4c0ec8a/kube-rbac-proxy/0.log" Nov 25 10:14:19 crc kubenswrapper[4687]: I1125 10:14:19.426384 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-wxt5h_ed7a2e30-6110-4b1c-864f-4856c4c0ec8a/manager/0.log" Nov 25 10:14:19 crc kubenswrapper[4687]: I1125 10:14:19.514239 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-zwv7l_9c7f6da9-8178-4c3f-b565-9f6eca26c6c7/manager/0.log" Nov 25 10:14:19 crc kubenswrapper[4687]: I1125 10:14:19.534915 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-zwv7l_9c7f6da9-8178-4c3f-b565-9f6eca26c6c7/kube-rbac-proxy/0.log" Nov 25 10:14:19 crc kubenswrapper[4687]: I1125 10:14:19.650708 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-lsr97_956b1e07-e4b1-44cf-9990-ae928a3e11c7/kube-rbac-proxy/0.log" Nov 25 10:14:19 crc kubenswrapper[4687]: I1125 10:14:19.721578 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-lsr97_956b1e07-e4b1-44cf-9990-ae928a3e11c7/manager/0.log" Nov 25 10:14:19 crc kubenswrapper[4687]: I1125 10:14:19.770007 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-w2vc9_0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14/kube-rbac-proxy/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.005485 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-w2vc9_0f8d48bb-bbe7-4dd5-9a0c-5d5f769ebc14/manager/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.005641 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-7r5w5_7c015be6-1e7f-404b-9ea0-31cbec410081/kube-rbac-proxy/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.037977 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-7r5w5_7c015be6-1e7f-404b-9ea0-31cbec410081/manager/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.173069 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-khvhw_dc4b5a7c-5e58-42a7-b1ee-676268f99e21/kube-rbac-proxy/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.463839 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-khvhw_dc4b5a7c-5e58-42a7-b1ee-676268f99e21/manager/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.489136 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-cf25t_469145fd-b998-4c0a-b356-508c4940f78b/kube-rbac-proxy/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.632489 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-qfcjj_b07b54a0-d4b4-49e3-bd03-810eeefa6fa7/kube-rbac-proxy/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.662445 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-cf25t_469145fd-b998-4c0a-b356-508c4940f78b/manager/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.740231 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-qfcjj_b07b54a0-d4b4-49e3-bd03-810eeefa6fa7/manager/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.868155 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-qzdnw_555e5cf5-f2f8-46f2-ab17-8589c7391fc8/kube-rbac-proxy/0.log" Nov 25 10:14:20 crc kubenswrapper[4687]: I1125 10:14:20.908494 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-qzdnw_555e5cf5-f2f8-46f2-ab17-8589c7391fc8/manager/0.log" Nov 25 10:14:21 crc kubenswrapper[4687]: I1125 10:14:21.088384 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-99k8n_62a14c39-245f-4c8d-84b5-b23d023d810f/kube-rbac-proxy/0.log" Nov 25 10:14:21 crc kubenswrapper[4687]: I1125 10:14:21.163523 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-99k8n_62a14c39-245f-4c8d-84b5-b23d023d810f/manager/0.log" Nov 25 10:14:21 crc kubenswrapper[4687]: I1125 10:14:21.275101 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-z59ft_78edbfb4-5838-4c2d-a4e3-e1512bb55654/kube-rbac-proxy/0.log" Nov 25 10:14:21 crc kubenswrapper[4687]: I1125 10:14:21.312102 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-z59ft_78edbfb4-5838-4c2d-a4e3-e1512bb55654/manager/0.log" Nov 25 10:14:21 crc kubenswrapper[4687]: I1125 10:14:21.380142 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5_9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be/kube-rbac-proxy/0.log" Nov 25 10:14:21 crc kubenswrapper[4687]: I1125 10:14:21.398395 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-5dhv5_9ceb9cae-7f5d-4bfd-892d-a3eca9d5f0be/manager/0.log" Nov 25 10:14:21 crc kubenswrapper[4687]: I1125 10:14:21.732997 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5888c99dff-lmgz9_35a98c5c-b3b4-4e95-821d-923a693b67e0/operator/0.log" Nov 25 10:14:21 crc kubenswrapper[4687]: I1125 10:14:21.768167 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-xg64b_fabb562b-35a6-4e1d-bdd5-5357491f9ad6/registry-server/0.log" Nov 25 10:14:21 crc kubenswrapper[4687]: I1125 10:14:21.868830 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-c8plr_be89cce1-89d8-47da-b777-f7805762b230/kube-rbac-proxy/0.log" Nov 25 10:14:21 crc kubenswrapper[4687]: I1125 10:14:21.970775 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-c8plr_be89cce1-89d8-47da-b777-f7805762b230/manager/0.log" Nov 25 10:14:22 crc kubenswrapper[4687]: I1125 10:14:22.005888 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-qsrlz_99367bef-5882-4884-8fe5-9a3ff8edd1cb/kube-rbac-proxy/0.log" Nov 25 10:14:22 crc kubenswrapper[4687]: I1125 10:14:22.078317 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-qsrlz_99367bef-5882-4884-8fe5-9a3ff8edd1cb/manager/0.log" Nov 25 10:14:22 crc kubenswrapper[4687]: I1125 10:14:22.265175 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-6lbv2_5a4bd509-a298-4fff-845b-262a41634134/operator/0.log" Nov 25 10:14:22 crc kubenswrapper[4687]: I1125 10:14:22.335799 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-pqd76_ac5487a2-ce65-4034-973b-b939494aef63/kube-rbac-proxy/0.log" Nov 25 10:14:22 crc kubenswrapper[4687]: I1125 10:14:22.415206 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-pqd76_ac5487a2-ce65-4034-973b-b939494aef63/manager/0.log" Nov 25 10:14:22 crc kubenswrapper[4687]: I1125 10:14:22.504706 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-s9vtg_4a1d4849-2906-4fd5-b54e-7f2e567f05ef/kube-rbac-proxy/0.log" Nov 25 10:14:22 crc kubenswrapper[4687]: I1125 10:14:22.645997 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-s9vtg_4a1d4849-2906-4fd5-b54e-7f2e567f05ef/manager/0.log" Nov 25 10:14:22 crc kubenswrapper[4687]: I1125 10:14:22.865308 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-d664976d5-hdtkm_72c3a2af-6e0e-4862-b638-2694a71f1e5a/manager/0.log" Nov 25 10:14:23 crc kubenswrapper[4687]: I1125 10:14:23.338963 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-7jmgj_9fac1200-5b4d-4032-98aa-d293d13fdcc7/kube-rbac-proxy/0.log" Nov 25 10:14:23 crc kubenswrapper[4687]: I1125 10:14:23.383430 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-7jmgj_9fac1200-5b4d-4032-98aa-d293d13fdcc7/manager/0.log" Nov 25 10:14:23 crc kubenswrapper[4687]: I1125 10:14:23.391126 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-ll9p9_c2ea5569-33b4-403d-9303-770ec432f4cc/kube-rbac-proxy/0.log" Nov 25 10:14:23 crc kubenswrapper[4687]: I1125 10:14:23.535366 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-ll9p9_c2ea5569-33b4-403d-9303-770ec432f4cc/manager/0.log" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.804924 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-s8jkc"] Nov 25 10:14:25 crc kubenswrapper[4687]: E1125 10:14:25.805593 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92b97828-9989-4f8a-af1e-8ab5885892c2" containerName="container-00" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.805606 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="92b97828-9989-4f8a-af1e-8ab5885892c2" containerName="container-00" Nov 25 10:14:25 crc kubenswrapper[4687]: E1125 10:14:25.805640 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604532e7-b921-4731-abe4-47f29f720b99" containerName="extract-content" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.805651 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="604532e7-b921-4731-abe4-47f29f720b99" containerName="extract-content" Nov 25 10:14:25 crc kubenswrapper[4687]: E1125 10:14:25.805669 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604532e7-b921-4731-abe4-47f29f720b99" containerName="registry-server" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.805677 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="604532e7-b921-4731-abe4-47f29f720b99" containerName="registry-server" Nov 25 10:14:25 crc kubenswrapper[4687]: E1125 10:14:25.805689 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604532e7-b921-4731-abe4-47f29f720b99" containerName="extract-utilities" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.805695 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="604532e7-b921-4731-abe4-47f29f720b99" containerName="extract-utilities" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.805937 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="604532e7-b921-4731-abe4-47f29f720b99" containerName="registry-server" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.806009 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="92b97828-9989-4f8a-af1e-8ab5885892c2" containerName="container-00" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.807675 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.816964 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s8jkc"] Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.897306 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwmhd\" (UniqueName: \"kubernetes.io/projected/4898fc6b-3556-4c62-93df-f94a4517940c-kube-api-access-wwmhd\") pod \"redhat-operators-s8jkc\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.897378 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-catalog-content\") pod \"redhat-operators-s8jkc\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:25 crc kubenswrapper[4687]: I1125 10:14:25.897415 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-utilities\") pod \"redhat-operators-s8jkc\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:26 crc kubenswrapper[4687]: I1125 10:14:25.999535 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwmhd\" (UniqueName: \"kubernetes.io/projected/4898fc6b-3556-4c62-93df-f94a4517940c-kube-api-access-wwmhd\") pod \"redhat-operators-s8jkc\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:26 crc kubenswrapper[4687]: I1125 10:14:25.999604 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-catalog-content\") pod \"redhat-operators-s8jkc\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:26 crc kubenswrapper[4687]: I1125 10:14:25.999630 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-utilities\") pod \"redhat-operators-s8jkc\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:26 crc kubenswrapper[4687]: I1125 10:14:26.000080 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-utilities\") pod \"redhat-operators-s8jkc\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:26 crc kubenswrapper[4687]: I1125 10:14:26.000599 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-catalog-content\") pod \"redhat-operators-s8jkc\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:26 crc kubenswrapper[4687]: I1125 10:14:26.035555 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwmhd\" (UniqueName: \"kubernetes.io/projected/4898fc6b-3556-4c62-93df-f94a4517940c-kube-api-access-wwmhd\") pod \"redhat-operators-s8jkc\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:26 crc kubenswrapper[4687]: I1125 10:14:26.127812 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:26 crc kubenswrapper[4687]: I1125 10:14:26.669024 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s8jkc"] Nov 25 10:14:27 crc kubenswrapper[4687]: I1125 10:14:27.216343 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s8jkc" event={"ID":"4898fc6b-3556-4c62-93df-f94a4517940c","Type":"ContainerStarted","Data":"af291e86f4c97bd95673d46d02f2344684b11103e054d76dfc406cdf2ba3724c"} Nov 25 10:14:28 crc kubenswrapper[4687]: I1125 10:14:28.229364 4687 generic.go:334] "Generic (PLEG): container finished" podID="4898fc6b-3556-4c62-93df-f94a4517940c" containerID="cd0e8ad9087afd630b55cf91a3e6cb38b4f149594fdfd7ab506ddea958e76ec9" exitCode=0 Nov 25 10:14:28 crc kubenswrapper[4687]: I1125 10:14:28.229467 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s8jkc" event={"ID":"4898fc6b-3556-4c62-93df-f94a4517940c","Type":"ContainerDied","Data":"cd0e8ad9087afd630b55cf91a3e6cb38b4f149594fdfd7ab506ddea958e76ec9"} Nov 25 10:14:30 crc kubenswrapper[4687]: I1125 10:14:30.251370 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s8jkc" event={"ID":"4898fc6b-3556-4c62-93df-f94a4517940c","Type":"ContainerStarted","Data":"e3f036923fd8b6a589ff39ad1e74c29c4263b1e63ac6c1e212aab5b190af6139"} Nov 25 10:14:31 crc kubenswrapper[4687]: I1125 10:14:31.266954 4687 generic.go:334] "Generic (PLEG): container finished" podID="4898fc6b-3556-4c62-93df-f94a4517940c" containerID="e3f036923fd8b6a589ff39ad1e74c29c4263b1e63ac6c1e212aab5b190af6139" exitCode=0 Nov 25 10:14:31 crc kubenswrapper[4687]: I1125 10:14:31.267019 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s8jkc" event={"ID":"4898fc6b-3556-4c62-93df-f94a4517940c","Type":"ContainerDied","Data":"e3f036923fd8b6a589ff39ad1e74c29c4263b1e63ac6c1e212aab5b190af6139"} Nov 25 10:14:36 crc kubenswrapper[4687]: I1125 10:14:36.318828 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s8jkc" event={"ID":"4898fc6b-3556-4c62-93df-f94a4517940c","Type":"ContainerStarted","Data":"61bb8c29381c44c445921fc08a88f220e38ee136efe547e5a49e4dc216a8ab04"} Nov 25 10:14:37 crc kubenswrapper[4687]: I1125 10:14:37.361376 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-s8jkc" podStartSLOduration=7.804399523 podStartE2EDuration="12.361351621s" podCreationTimestamp="2025-11-25 10:14:25 +0000 UTC" firstStartedPulling="2025-11-25 10:14:28.231261198 +0000 UTC m=+4263.284900916" lastFinishedPulling="2025-11-25 10:14:32.788213296 +0000 UTC m=+4267.841853014" observedRunningTime="2025-11-25 10:14:37.355720218 +0000 UTC m=+4272.409359936" watchObservedRunningTime="2025-11-25 10:14:37.361351621 +0000 UTC m=+4272.414991339" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.450670 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sst2l"] Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.452987 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.465959 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sst2l"] Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.558844 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-catalog-content\") pod \"certified-operators-sst2l\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.558965 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzfzq\" (UniqueName: \"kubernetes.io/projected/5984207d-ea39-4d83-9af5-7299cba9a0c1-kube-api-access-dzfzq\") pod \"certified-operators-sst2l\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.559004 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-utilities\") pod \"certified-operators-sst2l\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.660555 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-catalog-content\") pod \"certified-operators-sst2l\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.660635 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzfzq\" (UniqueName: \"kubernetes.io/projected/5984207d-ea39-4d83-9af5-7299cba9a0c1-kube-api-access-dzfzq\") pod \"certified-operators-sst2l\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.660658 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-utilities\") pod \"certified-operators-sst2l\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.661142 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-catalog-content\") pod \"certified-operators-sst2l\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.661179 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-utilities\") pod \"certified-operators-sst2l\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.681738 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzfzq\" (UniqueName: \"kubernetes.io/projected/5984207d-ea39-4d83-9af5-7299cba9a0c1-kube-api-access-dzfzq\") pod \"certified-operators-sst2l\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:44 crc kubenswrapper[4687]: I1125 10:14:44.774825 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.044016 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-g9bl7"] Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.046153 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.064182 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g9bl7"] Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.168784 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-utilities\") pod \"community-operators-g9bl7\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.168861 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45r98\" (UniqueName: \"kubernetes.io/projected/f13ca2d5-16a8-4362-9062-86c12c40ed2f-kube-api-access-45r98\") pod \"community-operators-g9bl7\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.168890 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-catalog-content\") pod \"community-operators-g9bl7\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.270956 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-utilities\") pod \"community-operators-g9bl7\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.271252 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45r98\" (UniqueName: \"kubernetes.io/projected/f13ca2d5-16a8-4362-9062-86c12c40ed2f-kube-api-access-45r98\") pod \"community-operators-g9bl7\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.271420 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-catalog-content\") pod \"community-operators-g9bl7\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.271604 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-utilities\") pod \"community-operators-g9bl7\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.271997 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-catalog-content\") pod \"community-operators-g9bl7\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.294936 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45r98\" (UniqueName: \"kubernetes.io/projected/f13ca2d5-16a8-4362-9062-86c12c40ed2f-kube-api-access-45r98\") pod \"community-operators-g9bl7\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.368456 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:14:45 crc kubenswrapper[4687]: I1125 10:14:45.956752 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sst2l"] Nov 25 10:14:46 crc kubenswrapper[4687]: I1125 10:14:46.059211 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-j4vxg_74dce578-27e9-4dc2-ac45-9019de15d559/control-plane-machine-set-operator/0.log" Nov 25 10:14:46 crc kubenswrapper[4687]: I1125 10:14:46.128593 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:46 crc kubenswrapper[4687]: I1125 10:14:46.128928 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:46 crc kubenswrapper[4687]: I1125 10:14:46.156621 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-v9zjk_25aecd46-36d8-4ee9-bae5-4731e91b5e74/kube-rbac-proxy/0.log" Nov 25 10:14:46 crc kubenswrapper[4687]: I1125 10:14:46.181626 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:46 crc kubenswrapper[4687]: I1125 10:14:46.278716 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-v9zjk_25aecd46-36d8-4ee9-bae5-4731e91b5e74/machine-api-operator/0.log" Nov 25 10:14:46 crc kubenswrapper[4687]: I1125 10:14:46.294083 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g9bl7"] Nov 25 10:14:46 crc kubenswrapper[4687]: I1125 10:14:46.419493 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9bl7" event={"ID":"f13ca2d5-16a8-4362-9062-86c12c40ed2f","Type":"ContainerStarted","Data":"5810c05bf62efe970c4453ff1d8a324111a2158892e5188609f3a45f05a6ecff"} Nov 25 10:14:46 crc kubenswrapper[4687]: I1125 10:14:46.420895 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sst2l" event={"ID":"5984207d-ea39-4d83-9af5-7299cba9a0c1","Type":"ContainerStarted","Data":"2c492fe7f0ce8f29c446e6f35091715f40e8c6200678226aeb4d03f7091240a3"} Nov 25 10:14:46 crc kubenswrapper[4687]: I1125 10:14:46.471768 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:49 crc kubenswrapper[4687]: I1125 10:14:49.475135 4687 generic.go:334] "Generic (PLEG): container finished" podID="5984207d-ea39-4d83-9af5-7299cba9a0c1" containerID="dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c" exitCode=0 Nov 25 10:14:49 crc kubenswrapper[4687]: I1125 10:14:49.475218 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sst2l" event={"ID":"5984207d-ea39-4d83-9af5-7299cba9a0c1","Type":"ContainerDied","Data":"dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c"} Nov 25 10:14:49 crc kubenswrapper[4687]: I1125 10:14:49.477556 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9bl7" event={"ID":"f13ca2d5-16a8-4362-9062-86c12c40ed2f","Type":"ContainerStarted","Data":"a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902"} Nov 25 10:14:50 crc kubenswrapper[4687]: I1125 10:14:50.489888 4687 generic.go:334] "Generic (PLEG): container finished" podID="f13ca2d5-16a8-4362-9062-86c12c40ed2f" containerID="a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902" exitCode=0 Nov 25 10:14:50 crc kubenswrapper[4687]: I1125 10:14:50.489987 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9bl7" event={"ID":"f13ca2d5-16a8-4362-9062-86c12c40ed2f","Type":"ContainerDied","Data":"a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902"} Nov 25 10:14:50 crc kubenswrapper[4687]: I1125 10:14:50.837079 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s8jkc"] Nov 25 10:14:50 crc kubenswrapper[4687]: I1125 10:14:50.837296 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-s8jkc" podUID="4898fc6b-3556-4c62-93df-f94a4517940c" containerName="registry-server" containerID="cri-o://61bb8c29381c44c445921fc08a88f220e38ee136efe547e5a49e4dc216a8ab04" gracePeriod=2 Nov 25 10:14:51 crc kubenswrapper[4687]: I1125 10:14:51.500186 4687 generic.go:334] "Generic (PLEG): container finished" podID="4898fc6b-3556-4c62-93df-f94a4517940c" containerID="61bb8c29381c44c445921fc08a88f220e38ee136efe547e5a49e4dc216a8ab04" exitCode=0 Nov 25 10:14:51 crc kubenswrapper[4687]: I1125 10:14:51.500479 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s8jkc" event={"ID":"4898fc6b-3556-4c62-93df-f94a4517940c","Type":"ContainerDied","Data":"61bb8c29381c44c445921fc08a88f220e38ee136efe547e5a49e4dc216a8ab04"} Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.087219 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.206163 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-utilities\") pod \"4898fc6b-3556-4c62-93df-f94a4517940c\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.206260 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-catalog-content\") pod \"4898fc6b-3556-4c62-93df-f94a4517940c\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.206431 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwmhd\" (UniqueName: \"kubernetes.io/projected/4898fc6b-3556-4c62-93df-f94a4517940c-kube-api-access-wwmhd\") pod \"4898fc6b-3556-4c62-93df-f94a4517940c\" (UID: \"4898fc6b-3556-4c62-93df-f94a4517940c\") " Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.208371 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-utilities" (OuterVolumeSpecName: "utilities") pod "4898fc6b-3556-4c62-93df-f94a4517940c" (UID: "4898fc6b-3556-4c62-93df-f94a4517940c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.212975 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4898fc6b-3556-4c62-93df-f94a4517940c-kube-api-access-wwmhd" (OuterVolumeSpecName: "kube-api-access-wwmhd") pod "4898fc6b-3556-4c62-93df-f94a4517940c" (UID: "4898fc6b-3556-4c62-93df-f94a4517940c"). InnerVolumeSpecName "kube-api-access-wwmhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.291740 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4898fc6b-3556-4c62-93df-f94a4517940c" (UID: "4898fc6b-3556-4c62-93df-f94a4517940c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.308381 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.308416 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4898fc6b-3556-4c62-93df-f94a4517940c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.308429 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwmhd\" (UniqueName: \"kubernetes.io/projected/4898fc6b-3556-4c62-93df-f94a4517940c-kube-api-access-wwmhd\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.511142 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s8jkc" event={"ID":"4898fc6b-3556-4c62-93df-f94a4517940c","Type":"ContainerDied","Data":"af291e86f4c97bd95673d46d02f2344684b11103e054d76dfc406cdf2ba3724c"} Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.511470 4687 scope.go:117] "RemoveContainer" containerID="61bb8c29381c44c445921fc08a88f220e38ee136efe547e5a49e4dc216a8ab04" Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.511278 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s8jkc" Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.534133 4687 scope.go:117] "RemoveContainer" containerID="e3f036923fd8b6a589ff39ad1e74c29c4263b1e63ac6c1e212aab5b190af6139" Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.549419 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s8jkc"] Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.559577 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-s8jkc"] Nov 25 10:14:52 crc kubenswrapper[4687]: I1125 10:14:52.566071 4687 scope.go:117] "RemoveContainer" containerID="cd0e8ad9087afd630b55cf91a3e6cb38b4f149594fdfd7ab506ddea958e76ec9" Nov 25 10:14:53 crc kubenswrapper[4687]: I1125 10:14:53.523393 4687 generic.go:334] "Generic (PLEG): container finished" podID="f13ca2d5-16a8-4362-9062-86c12c40ed2f" containerID="16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea" exitCode=0 Nov 25 10:14:53 crc kubenswrapper[4687]: I1125 10:14:53.523541 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9bl7" event={"ID":"f13ca2d5-16a8-4362-9062-86c12c40ed2f","Type":"ContainerDied","Data":"16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea"} Nov 25 10:14:53 crc kubenswrapper[4687]: I1125 10:14:53.529865 4687 generic.go:334] "Generic (PLEG): container finished" podID="5984207d-ea39-4d83-9af5-7299cba9a0c1" containerID="85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6" exitCode=0 Nov 25 10:14:53 crc kubenswrapper[4687]: I1125 10:14:53.529903 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sst2l" event={"ID":"5984207d-ea39-4d83-9af5-7299cba9a0c1","Type":"ContainerDied","Data":"85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6"} Nov 25 10:14:53 crc kubenswrapper[4687]: I1125 10:14:53.746390 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4898fc6b-3556-4c62-93df-f94a4517940c" path="/var/lib/kubelet/pods/4898fc6b-3556-4c62-93df-f94a4517940c/volumes" Nov 25 10:14:53 crc kubenswrapper[4687]: I1125 10:14:53.844422 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:14:53 crc kubenswrapper[4687]: I1125 10:14:53.844769 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:14:55 crc kubenswrapper[4687]: I1125 10:14:55.549518 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9bl7" event={"ID":"f13ca2d5-16a8-4362-9062-86c12c40ed2f","Type":"ContainerStarted","Data":"14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c"} Nov 25 10:14:55 crc kubenswrapper[4687]: I1125 10:14:55.553327 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sst2l" event={"ID":"5984207d-ea39-4d83-9af5-7299cba9a0c1","Type":"ContainerStarted","Data":"b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c"} Nov 25 10:14:55 crc kubenswrapper[4687]: I1125 10:14:55.578538 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-g9bl7" podStartSLOduration=6.869374084 podStartE2EDuration="10.578515852s" podCreationTimestamp="2025-11-25 10:14:45 +0000 UTC" firstStartedPulling="2025-11-25 10:14:50.492423204 +0000 UTC m=+4285.546062932" lastFinishedPulling="2025-11-25 10:14:54.201564982 +0000 UTC m=+4289.255204700" observedRunningTime="2025-11-25 10:14:55.575003106 +0000 UTC m=+4290.628642834" watchObservedRunningTime="2025-11-25 10:14:55.578515852 +0000 UTC m=+4290.632155570" Nov 25 10:14:55 crc kubenswrapper[4687]: I1125 10:14:55.598221 4687 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sst2l" podStartSLOduration=7.805092153 podStartE2EDuration="11.598202855s" podCreationTimestamp="2025-11-25 10:14:44 +0000 UTC" firstStartedPulling="2025-11-25 10:14:50.4926723 +0000 UTC m=+4285.546312028" lastFinishedPulling="2025-11-25 10:14:54.285783012 +0000 UTC m=+4289.339422730" observedRunningTime="2025-11-25 10:14:55.596050456 +0000 UTC m=+4290.649690174" watchObservedRunningTime="2025-11-25 10:14:55.598202855 +0000 UTC m=+4290.651842573" Nov 25 10:14:58 crc kubenswrapper[4687]: I1125 10:14:58.597088 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-tc7r6_0ff14130-8c48-4847-8fa6-1ba61b371244/cert-manager-controller/0.log" Nov 25 10:14:58 crc kubenswrapper[4687]: I1125 10:14:58.662641 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-gk2n9_643e6584-e9ad-4fe0-96a6-d1dda245fe76/cert-manager-cainjector/0.log" Nov 25 10:14:58 crc kubenswrapper[4687]: I1125 10:14:58.748769 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-f22x8_4506551b-78bf-4fe5-8b60-e9e34a53c8df/cert-manager-webhook/0.log" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.194699 4687 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2"] Nov 25 10:15:00 crc kubenswrapper[4687]: E1125 10:15:00.195764 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4898fc6b-3556-4c62-93df-f94a4517940c" containerName="extract-utilities" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.195785 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4898fc6b-3556-4c62-93df-f94a4517940c" containerName="extract-utilities" Nov 25 10:15:00 crc kubenswrapper[4687]: E1125 10:15:00.195803 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4898fc6b-3556-4c62-93df-f94a4517940c" containerName="registry-server" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.195812 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4898fc6b-3556-4c62-93df-f94a4517940c" containerName="registry-server" Nov 25 10:15:00 crc kubenswrapper[4687]: E1125 10:15:00.195856 4687 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4898fc6b-3556-4c62-93df-f94a4517940c" containerName="extract-content" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.195866 4687 state_mem.go:107] "Deleted CPUSet assignment" podUID="4898fc6b-3556-4c62-93df-f94a4517940c" containerName="extract-content" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.196136 4687 memory_manager.go:354] "RemoveStaleState removing state" podUID="4898fc6b-3556-4c62-93df-f94a4517940c" containerName="registry-server" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.197069 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.199675 4687 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.200954 4687 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.208115 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2"] Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.260413 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk7tl\" (UniqueName: \"kubernetes.io/projected/71fff6cd-076c-447e-9efa-4a3219b8b0d2-kube-api-access-nk7tl\") pod \"collect-profiles-29401095-52ts2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.260622 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/71fff6cd-076c-447e-9efa-4a3219b8b0d2-secret-volume\") pod \"collect-profiles-29401095-52ts2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.260764 4687 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/71fff6cd-076c-447e-9efa-4a3219b8b0d2-config-volume\") pod \"collect-profiles-29401095-52ts2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.364474 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk7tl\" (UniqueName: \"kubernetes.io/projected/71fff6cd-076c-447e-9efa-4a3219b8b0d2-kube-api-access-nk7tl\") pod \"collect-profiles-29401095-52ts2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.364628 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/71fff6cd-076c-447e-9efa-4a3219b8b0d2-secret-volume\") pod \"collect-profiles-29401095-52ts2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.364666 4687 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/71fff6cd-076c-447e-9efa-4a3219b8b0d2-config-volume\") pod \"collect-profiles-29401095-52ts2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.365997 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/71fff6cd-076c-447e-9efa-4a3219b8b0d2-config-volume\") pod \"collect-profiles-29401095-52ts2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.386139 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/71fff6cd-076c-447e-9efa-4a3219b8b0d2-secret-volume\") pod \"collect-profiles-29401095-52ts2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.389405 4687 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk7tl\" (UniqueName: \"kubernetes.io/projected/71fff6cd-076c-447e-9efa-4a3219b8b0d2-kube-api-access-nk7tl\") pod \"collect-profiles-29401095-52ts2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:00 crc kubenswrapper[4687]: I1125 10:15:00.525049 4687 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:01 crc kubenswrapper[4687]: I1125 10:15:01.007660 4687 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2"] Nov 25 10:15:01 crc kubenswrapper[4687]: I1125 10:15:01.619084 4687 generic.go:334] "Generic (PLEG): container finished" podID="71fff6cd-076c-447e-9efa-4a3219b8b0d2" containerID="fa9d4b3aa84538764f672b9d6280494bc755bb516c4ec99795ff3cd1696bbc7e" exitCode=0 Nov 25 10:15:01 crc kubenswrapper[4687]: I1125 10:15:01.619382 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" event={"ID":"71fff6cd-076c-447e-9efa-4a3219b8b0d2","Type":"ContainerDied","Data":"fa9d4b3aa84538764f672b9d6280494bc755bb516c4ec99795ff3cd1696bbc7e"} Nov 25 10:15:01 crc kubenswrapper[4687]: I1125 10:15:01.619413 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" event={"ID":"71fff6cd-076c-447e-9efa-4a3219b8b0d2","Type":"ContainerStarted","Data":"38d5fb41f5ab1c9acd2da36fa8dee7b2152b6744a5621b7693138a7daa4363d8"} Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.133574 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.231850 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk7tl\" (UniqueName: \"kubernetes.io/projected/71fff6cd-076c-447e-9efa-4a3219b8b0d2-kube-api-access-nk7tl\") pod \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.232063 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/71fff6cd-076c-447e-9efa-4a3219b8b0d2-config-volume\") pod \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.232121 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/71fff6cd-076c-447e-9efa-4a3219b8b0d2-secret-volume\") pod \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\" (UID: \"71fff6cd-076c-447e-9efa-4a3219b8b0d2\") " Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.233320 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71fff6cd-076c-447e-9efa-4a3219b8b0d2-config-volume" (OuterVolumeSpecName: "config-volume") pod "71fff6cd-076c-447e-9efa-4a3219b8b0d2" (UID: "71fff6cd-076c-447e-9efa-4a3219b8b0d2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.239696 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71fff6cd-076c-447e-9efa-4a3219b8b0d2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "71fff6cd-076c-447e-9efa-4a3219b8b0d2" (UID: "71fff6cd-076c-447e-9efa-4a3219b8b0d2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.243606 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71fff6cd-076c-447e-9efa-4a3219b8b0d2-kube-api-access-nk7tl" (OuterVolumeSpecName: "kube-api-access-nk7tl") pod "71fff6cd-076c-447e-9efa-4a3219b8b0d2" (UID: "71fff6cd-076c-447e-9efa-4a3219b8b0d2"). InnerVolumeSpecName "kube-api-access-nk7tl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.334776 4687 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/71fff6cd-076c-447e-9efa-4a3219b8b0d2-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.334813 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nk7tl\" (UniqueName: \"kubernetes.io/projected/71fff6cd-076c-447e-9efa-4a3219b8b0d2-kube-api-access-nk7tl\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.334822 4687 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/71fff6cd-076c-447e-9efa-4a3219b8b0d2-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.640365 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" event={"ID":"71fff6cd-076c-447e-9efa-4a3219b8b0d2","Type":"ContainerDied","Data":"38d5fb41f5ab1c9acd2da36fa8dee7b2152b6744a5621b7693138a7daa4363d8"} Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.640412 4687 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="38d5fb41f5ab1c9acd2da36fa8dee7b2152b6744a5621b7693138a7daa4363d8" Nov 25 10:15:03 crc kubenswrapper[4687]: I1125 10:15:03.640417 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-52ts2" Nov 25 10:15:04 crc kubenswrapper[4687]: I1125 10:15:04.200758 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm"] Nov 25 10:15:04 crc kubenswrapper[4687]: I1125 10:15:04.212691 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-crkvm"] Nov 25 10:15:04 crc kubenswrapper[4687]: I1125 10:15:04.776086 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:15:04 crc kubenswrapper[4687]: I1125 10:15:04.776440 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:15:04 crc kubenswrapper[4687]: I1125 10:15:04.823381 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:15:05 crc kubenswrapper[4687]: I1125 10:15:05.368832 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:15:05 crc kubenswrapper[4687]: I1125 10:15:05.369143 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:15:05 crc kubenswrapper[4687]: I1125 10:15:05.454891 4687 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:15:05 crc kubenswrapper[4687]: I1125 10:15:05.746113 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e34247b-6e99-4afd-988f-a22d68fd3858" path="/var/lib/kubelet/pods/6e34247b-6e99-4afd-988f-a22d68fd3858/volumes" Nov 25 10:15:06 crc kubenswrapper[4687]: I1125 10:15:06.148268 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:15:06 crc kubenswrapper[4687]: I1125 10:15:06.149256 4687 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:15:06 crc kubenswrapper[4687]: I1125 10:15:06.455347 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g9bl7"] Nov 25 10:15:07 crc kubenswrapper[4687]: I1125 10:15:07.683937 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-g9bl7" podUID="f13ca2d5-16a8-4362-9062-86c12c40ed2f" containerName="registry-server" containerID="cri-o://14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c" gracePeriod=2 Nov 25 10:15:07 crc kubenswrapper[4687]: I1125 10:15:07.861425 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sst2l"] Nov 25 10:15:07 crc kubenswrapper[4687]: I1125 10:15:07.862027 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sst2l" podUID="5984207d-ea39-4d83-9af5-7299cba9a0c1" containerName="registry-server" containerID="cri-o://b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c" gracePeriod=2 Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.148045 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.230017 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-catalog-content\") pod \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.230397 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45r98\" (UniqueName: \"kubernetes.io/projected/f13ca2d5-16a8-4362-9062-86c12c40ed2f-kube-api-access-45r98\") pod \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.230521 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-utilities\") pod \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\" (UID: \"f13ca2d5-16a8-4362-9062-86c12c40ed2f\") " Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.231190 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-utilities" (OuterVolumeSpecName: "utilities") pod "f13ca2d5-16a8-4362-9062-86c12c40ed2f" (UID: "f13ca2d5-16a8-4362-9062-86c12c40ed2f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.236900 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f13ca2d5-16a8-4362-9062-86c12c40ed2f-kube-api-access-45r98" (OuterVolumeSpecName: "kube-api-access-45r98") pod "f13ca2d5-16a8-4362-9062-86c12c40ed2f" (UID: "f13ca2d5-16a8-4362-9062-86c12c40ed2f"). InnerVolumeSpecName "kube-api-access-45r98". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.290800 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f13ca2d5-16a8-4362-9062-86c12c40ed2f" (UID: "f13ca2d5-16a8-4362-9062-86c12c40ed2f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.290966 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.333103 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.333139 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f13ca2d5-16a8-4362-9062-86c12c40ed2f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.333152 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45r98\" (UniqueName: \"kubernetes.io/projected/f13ca2d5-16a8-4362-9062-86c12c40ed2f-kube-api-access-45r98\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.435136 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-utilities\") pod \"5984207d-ea39-4d83-9af5-7299cba9a0c1\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.435762 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-utilities" (OuterVolumeSpecName: "utilities") pod "5984207d-ea39-4d83-9af5-7299cba9a0c1" (UID: "5984207d-ea39-4d83-9af5-7299cba9a0c1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.435770 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzfzq\" (UniqueName: \"kubernetes.io/projected/5984207d-ea39-4d83-9af5-7299cba9a0c1-kube-api-access-dzfzq\") pod \"5984207d-ea39-4d83-9af5-7299cba9a0c1\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.435892 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-catalog-content\") pod \"5984207d-ea39-4d83-9af5-7299cba9a0c1\" (UID: \"5984207d-ea39-4d83-9af5-7299cba9a0c1\") " Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.437395 4687 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.439804 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5984207d-ea39-4d83-9af5-7299cba9a0c1-kube-api-access-dzfzq" (OuterVolumeSpecName: "kube-api-access-dzfzq") pod "5984207d-ea39-4d83-9af5-7299cba9a0c1" (UID: "5984207d-ea39-4d83-9af5-7299cba9a0c1"). InnerVolumeSpecName "kube-api-access-dzfzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.493848 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5984207d-ea39-4d83-9af5-7299cba9a0c1" (UID: "5984207d-ea39-4d83-9af5-7299cba9a0c1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.539099 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzfzq\" (UniqueName: \"kubernetes.io/projected/5984207d-ea39-4d83-9af5-7299cba9a0c1-kube-api-access-dzfzq\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.539159 4687 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5984207d-ea39-4d83-9af5-7299cba9a0c1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.695744 4687 generic.go:334] "Generic (PLEG): container finished" podID="5984207d-ea39-4d83-9af5-7299cba9a0c1" containerID="b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c" exitCode=0 Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.695838 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sst2l" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.695851 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sst2l" event={"ID":"5984207d-ea39-4d83-9af5-7299cba9a0c1","Type":"ContainerDied","Data":"b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c"} Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.696228 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sst2l" event={"ID":"5984207d-ea39-4d83-9af5-7299cba9a0c1","Type":"ContainerDied","Data":"2c492fe7f0ce8f29c446e6f35091715f40e8c6200678226aeb4d03f7091240a3"} Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.696250 4687 scope.go:117] "RemoveContainer" containerID="b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.698341 4687 generic.go:334] "Generic (PLEG): container finished" podID="f13ca2d5-16a8-4362-9062-86c12c40ed2f" containerID="14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c" exitCode=0 Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.698381 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9bl7" event={"ID":"f13ca2d5-16a8-4362-9062-86c12c40ed2f","Type":"ContainerDied","Data":"14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c"} Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.698406 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9bl7" event={"ID":"f13ca2d5-16a8-4362-9062-86c12c40ed2f","Type":"ContainerDied","Data":"5810c05bf62efe970c4453ff1d8a324111a2158892e5188609f3a45f05a6ecff"} Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.698469 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g9bl7" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.716412 4687 scope.go:117] "RemoveContainer" containerID="85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.738185 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g9bl7"] Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.749520 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-g9bl7"] Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.756937 4687 scope.go:117] "RemoveContainer" containerID="dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.758015 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sst2l"] Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.767575 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sst2l"] Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.777863 4687 scope.go:117] "RemoveContainer" containerID="b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c" Nov 25 10:15:08 crc kubenswrapper[4687]: E1125 10:15:08.778259 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c\": container with ID starting with b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c not found: ID does not exist" containerID="b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.778304 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c"} err="failed to get container status \"b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c\": rpc error: code = NotFound desc = could not find container \"b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c\": container with ID starting with b339dacf780ce3680a562908c22bfa02dbea6fa39d8ab2856b32b267d9cc470c not found: ID does not exist" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.778325 4687 scope.go:117] "RemoveContainer" containerID="85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6" Nov 25 10:15:08 crc kubenswrapper[4687]: E1125 10:15:08.778742 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6\": container with ID starting with 85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6 not found: ID does not exist" containerID="85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.778799 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6"} err="failed to get container status \"85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6\": rpc error: code = NotFound desc = could not find container \"85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6\": container with ID starting with 85243bebe430b890d79d1e68396ec0c8e781d3704a9097b8cb74372a4bd84ff6 not found: ID does not exist" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.778827 4687 scope.go:117] "RemoveContainer" containerID="dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c" Nov 25 10:15:08 crc kubenswrapper[4687]: E1125 10:15:08.779072 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c\": container with ID starting with dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c not found: ID does not exist" containerID="dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.779105 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c"} err="failed to get container status \"dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c\": rpc error: code = NotFound desc = could not find container \"dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c\": container with ID starting with dae0ec58db6ab368a3b6556c740d71c14c45dd647f68bd07f7c1800bc4f6541c not found: ID does not exist" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.779124 4687 scope.go:117] "RemoveContainer" containerID="14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.800072 4687 scope.go:117] "RemoveContainer" containerID="16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.858206 4687 scope.go:117] "RemoveContainer" containerID="a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.903143 4687 scope.go:117] "RemoveContainer" containerID="14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c" Nov 25 10:15:08 crc kubenswrapper[4687]: E1125 10:15:08.904184 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c\": container with ID starting with 14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c not found: ID does not exist" containerID="14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.904222 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c"} err="failed to get container status \"14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c\": rpc error: code = NotFound desc = could not find container \"14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c\": container with ID starting with 14df9f5e3c54aa7edd7056c6bca49756e905acfa2e5f1b7ea58de26c5178ab7c not found: ID does not exist" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.904250 4687 scope.go:117] "RemoveContainer" containerID="16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea" Nov 25 10:15:08 crc kubenswrapper[4687]: E1125 10:15:08.904618 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea\": container with ID starting with 16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea not found: ID does not exist" containerID="16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.904652 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea"} err="failed to get container status \"16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea\": rpc error: code = NotFound desc = could not find container \"16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea\": container with ID starting with 16826b43378fd7e1d85b5d248fdf53605a429c9af4c3a990e00b806cef544dea not found: ID does not exist" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.904674 4687 scope.go:117] "RemoveContainer" containerID="a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902" Nov 25 10:15:08 crc kubenswrapper[4687]: E1125 10:15:08.904902 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902\": container with ID starting with a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902 not found: ID does not exist" containerID="a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902" Nov 25 10:15:08 crc kubenswrapper[4687]: I1125 10:15:08.904923 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902"} err="failed to get container status \"a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902\": rpc error: code = NotFound desc = could not find container \"a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902\": container with ID starting with a0c466d911326f80a6f0ae5f38a8ac34d58939573e69a2335b92ad60ca2fd902 not found: ID does not exist" Nov 25 10:15:09 crc kubenswrapper[4687]: I1125 10:15:09.745140 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5984207d-ea39-4d83-9af5-7299cba9a0c1" path="/var/lib/kubelet/pods/5984207d-ea39-4d83-9af5-7299cba9a0c1/volumes" Nov 25 10:15:09 crc kubenswrapper[4687]: I1125 10:15:09.746182 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f13ca2d5-16a8-4362-9062-86c12c40ed2f" path="/var/lib/kubelet/pods/f13ca2d5-16a8-4362-9062-86c12c40ed2f/volumes" Nov 25 10:15:10 crc kubenswrapper[4687]: I1125 10:15:10.699939 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-7zd5p_5197825b-263a-49cc-abde-f5863cac4989/nmstate-console-plugin/0.log" Nov 25 10:15:10 crc kubenswrapper[4687]: I1125 10:15:10.815076 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-dj956_39575ce1-8fae-41c6-8603-a4d49c101e7d/nmstate-handler/0.log" Nov 25 10:15:10 crc kubenswrapper[4687]: I1125 10:15:10.902583 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-9n22k_5e374138-9c9c-41b4-a2d1-eab48197d4bb/nmstate-metrics/0.log" Nov 25 10:15:10 crc kubenswrapper[4687]: I1125 10:15:10.919230 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-9n22k_5e374138-9c9c-41b4-a2d1-eab48197d4bb/kube-rbac-proxy/0.log" Nov 25 10:15:11 crc kubenswrapper[4687]: I1125 10:15:11.068691 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-vgd6d_4e8ba187-a91b-4312-bf06-eb0c5f0e5bd9/nmstate-operator/0.log" Nov 25 10:15:11 crc kubenswrapper[4687]: I1125 10:15:11.146879 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-p89k9_e6f4fd7b-09a3-44f9-9a7d-aaf6f7625989/nmstate-webhook/0.log" Nov 25 10:15:23 crc kubenswrapper[4687]: I1125 10:15:23.844686 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:15:23 crc kubenswrapper[4687]: I1125 10:15:23.845331 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.149324 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-6jtwr_eb69d750-834f-4728-8a20-f37dc1195e86/kube-rbac-proxy/0.log" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.312792 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-6jtwr_eb69d750-834f-4728-8a20-f37dc1195e86/controller/0.log" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.427953 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-frr-files/0.log" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.578719 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-frr-files/0.log" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.595862 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-reloader/0.log" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.620522 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-metrics/0.log" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.620861 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-reloader/0.log" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.799872 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-reloader/0.log" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.838840 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-metrics/0.log" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.839877 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-frr-files/0.log" Nov 25 10:15:25 crc kubenswrapper[4687]: I1125 10:15:25.843852 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-metrics/0.log" Nov 25 10:15:26 crc kubenswrapper[4687]: I1125 10:15:26.660782 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-frr-files/0.log" Nov 25 10:15:26 crc kubenswrapper[4687]: I1125 10:15:26.677173 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-metrics/0.log" Nov 25 10:15:26 crc kubenswrapper[4687]: I1125 10:15:26.686995 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/cp-reloader/0.log" Nov 25 10:15:26 crc kubenswrapper[4687]: I1125 10:15:26.689522 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/controller/0.log" Nov 25 10:15:26 crc kubenswrapper[4687]: I1125 10:15:26.846929 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/frr-metrics/0.log" Nov 25 10:15:26 crc kubenswrapper[4687]: I1125 10:15:26.900294 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/kube-rbac-proxy-frr/0.log" Nov 25 10:15:26 crc kubenswrapper[4687]: I1125 10:15:26.912354 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/kube-rbac-proxy/0.log" Nov 25 10:15:27 crc kubenswrapper[4687]: I1125 10:15:27.096697 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/reloader/0.log" Nov 25 10:15:27 crc kubenswrapper[4687]: I1125 10:15:27.148787 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-ddrgp_b9abb225-82ca-44ea-a30c-ec214deb3316/frr-k8s-webhook-server/0.log" Nov 25 10:15:27 crc kubenswrapper[4687]: I1125 10:15:27.382070 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-558db5dd86-fc58n_66122fe0-e231-48e6-8051-a04d330d8f17/manager/0.log" Nov 25 10:15:27 crc kubenswrapper[4687]: I1125 10:15:27.609782 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-84db77dcc8-bjgl5_8ed9d933-29d2-4e13-bb9b-377cdc8cf10a/webhook-server/0.log" Nov 25 10:15:27 crc kubenswrapper[4687]: I1125 10:15:27.713882 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-q7pt2_a2ef7e49-e737-462f-8ff8-b045611d5baf/kube-rbac-proxy/0.log" Nov 25 10:15:28 crc kubenswrapper[4687]: I1125 10:15:28.152742 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-cf84d_633f6bd8-eb2e-485d-8ef6-67800b34f877/frr/0.log" Nov 25 10:15:28 crc kubenswrapper[4687]: I1125 10:15:28.670925 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-q7pt2_a2ef7e49-e737-462f-8ff8-b045611d5baf/speaker/0.log" Nov 25 10:15:40 crc kubenswrapper[4687]: I1125 10:15:40.273711 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/util/0.log" Nov 25 10:15:40 crc kubenswrapper[4687]: I1125 10:15:40.433379 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/pull/0.log" Nov 25 10:15:40 crc kubenswrapper[4687]: I1125 10:15:40.469180 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/pull/0.log" Nov 25 10:15:40 crc kubenswrapper[4687]: I1125 10:15:40.511563 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/util/0.log" Nov 25 10:15:40 crc kubenswrapper[4687]: I1125 10:15:40.643932 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/util/0.log" Nov 25 10:15:40 crc kubenswrapper[4687]: I1125 10:15:40.644092 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/extract/0.log" Nov 25 10:15:40 crc kubenswrapper[4687]: I1125 10:15:40.685514 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772entfc7_94d372f4-1e99-47c6-89f3-d56aaf08cde8/pull/0.log" Nov 25 10:15:40 crc kubenswrapper[4687]: I1125 10:15:40.824273 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-utilities/0.log" Nov 25 10:15:40 crc kubenswrapper[4687]: I1125 10:15:40.977522 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-utilities/0.log" Nov 25 10:15:40 crc kubenswrapper[4687]: I1125 10:15:40.990458 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-content/0.log" Nov 25 10:15:41 crc kubenswrapper[4687]: I1125 10:15:41.002797 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-content/0.log" Nov 25 10:15:41 crc kubenswrapper[4687]: I1125 10:15:41.170847 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-content/0.log" Nov 25 10:15:41 crc kubenswrapper[4687]: I1125 10:15:41.185058 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/extract-utilities/0.log" Nov 25 10:15:41 crc kubenswrapper[4687]: I1125 10:15:41.418825 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-utilities/0.log" Nov 25 10:15:41 crc kubenswrapper[4687]: I1125 10:15:41.662552 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-content/0.log" Nov 25 10:15:41 crc kubenswrapper[4687]: I1125 10:15:41.674242 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-utilities/0.log" Nov 25 10:15:41 crc kubenswrapper[4687]: I1125 10:15:41.685123 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-content/0.log" Nov 25 10:15:41 crc kubenswrapper[4687]: I1125 10:15:41.685433 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qgl5s_c463bf2b-be16-4478-b58b-50c681a58749/registry-server/0.log" Nov 25 10:15:41 crc kubenswrapper[4687]: I1125 10:15:41.856677 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-utilities/0.log" Nov 25 10:15:41 crc kubenswrapper[4687]: I1125 10:15:41.940774 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/extract-content/0.log" Nov 25 10:15:42 crc kubenswrapper[4687]: I1125 10:15:42.094256 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/util/0.log" Nov 25 10:15:42 crc kubenswrapper[4687]: I1125 10:15:42.331712 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/util/0.log" Nov 25 10:15:42 crc kubenswrapper[4687]: I1125 10:15:42.376476 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/pull/0.log" Nov 25 10:15:42 crc kubenswrapper[4687]: I1125 10:15:42.401990 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/pull/0.log" Nov 25 10:15:42 crc kubenswrapper[4687]: I1125 10:15:42.540790 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/pull/0.log" Nov 25 10:15:42 crc kubenswrapper[4687]: I1125 10:15:42.578814 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/util/0.log" Nov 25 10:15:42 crc kubenswrapper[4687]: I1125 10:15:42.598619 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6ktfjl_3775e1e0-7599-47fa-b6af-872dff20eb0a/extract/0.log" Nov 25 10:15:42 crc kubenswrapper[4687]: I1125 10:15:42.744138 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-t6b8w_c4f80d13-afbf-4fd7-8af8-726aef33138a/registry-server/0.log" Nov 25 10:15:42 crc kubenswrapper[4687]: I1125 10:15:42.769859 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-sdgfq_a26f1e8c-3181-4dbf-b2b6-13772b1d66d6/marketplace-operator/0.log" Nov 25 10:15:42 crc kubenswrapper[4687]: I1125 10:15:42.944144 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-utilities/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.137734 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-content/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.157414 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-utilities/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.189242 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-content/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.306464 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-content/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.309166 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/extract-utilities/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.490397 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rf9fh_92741ed4-1138-4ac2-ad4b-0c558b1b574d/registry-server/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.509121 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-utilities/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.677610 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-content/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.692346 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-utilities/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.716046 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-content/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.941421 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-utilities/0.log" Nov 25 10:15:43 crc kubenswrapper[4687]: I1125 10:15:43.963163 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/extract-content/0.log" Nov 25 10:15:44 crc kubenswrapper[4687]: I1125 10:15:44.472663 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ml5cs_d587d63c-f76c-47e0-8e59-ff79ca1d8390/registry-server/0.log" Nov 25 10:15:53 crc kubenswrapper[4687]: I1125 10:15:53.485319 4687 scope.go:117] "RemoveContainer" containerID="9a625a576a7b3e429db1c99886f52c13e6ffb43c9f74fd1ba78a9b708e3541b7" Nov 25 10:15:53 crc kubenswrapper[4687]: I1125 10:15:53.844756 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:15:53 crc kubenswrapper[4687]: I1125 10:15:53.844831 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:15:53 crc kubenswrapper[4687]: I1125 10:15:53.844878 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 10:15:53 crc kubenswrapper[4687]: I1125 10:15:53.845670 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f6be9ff931585212d9effca07fbb25c8a5e8790fec54c8590b61dc256c888a77"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:15:53 crc kubenswrapper[4687]: I1125 10:15:53.845741 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://f6be9ff931585212d9effca07fbb25c8a5e8790fec54c8590b61dc256c888a77" gracePeriod=600 Nov 25 10:15:54 crc kubenswrapper[4687]: I1125 10:15:54.090457 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="f6be9ff931585212d9effca07fbb25c8a5e8790fec54c8590b61dc256c888a77" exitCode=0 Nov 25 10:15:54 crc kubenswrapper[4687]: I1125 10:15:54.090492 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"f6be9ff931585212d9effca07fbb25c8a5e8790fec54c8590b61dc256c888a77"} Nov 25 10:15:54 crc kubenswrapper[4687]: I1125 10:15:54.090584 4687 scope.go:117] "RemoveContainer" containerID="0212d6b95d85601e7dfe362244848d1b8fc9f753c4f5b9eb4e0eee3b25ecfef9" Nov 25 10:15:55 crc kubenswrapper[4687]: I1125 10:15:55.101365 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerStarted","Data":"e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b"} Nov 25 10:17:36 crc kubenswrapper[4687]: I1125 10:17:36.086642 4687 generic.go:334] "Generic (PLEG): container finished" podID="b5895325-0d62-4355-99f5-edf281f9a5c6" containerID="7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00" exitCode=0 Nov 25 10:17:36 crc kubenswrapper[4687]: I1125 10:17:36.086715 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pc7f9/must-gather-ltxcn" event={"ID":"b5895325-0d62-4355-99f5-edf281f9a5c6","Type":"ContainerDied","Data":"7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00"} Nov 25 10:17:36 crc kubenswrapper[4687]: I1125 10:17:36.088897 4687 scope.go:117] "RemoveContainer" containerID="7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00" Nov 25 10:17:36 crc kubenswrapper[4687]: I1125 10:17:36.436306 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pc7f9_must-gather-ltxcn_b5895325-0d62-4355-99f5-edf281f9a5c6/gather/0.log" Nov 25 10:17:47 crc kubenswrapper[4687]: I1125 10:17:47.274938 4687 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pc7f9/must-gather-ltxcn"] Nov 25 10:17:47 crc kubenswrapper[4687]: I1125 10:17:47.275762 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-pc7f9/must-gather-ltxcn" podUID="b5895325-0d62-4355-99f5-edf281f9a5c6" containerName="copy" containerID="cri-o://de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc" gracePeriod=2 Nov 25 10:17:47 crc kubenswrapper[4687]: I1125 10:17:47.285183 4687 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pc7f9/must-gather-ltxcn"] Nov 25 10:17:47 crc kubenswrapper[4687]: I1125 10:17:47.772050 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pc7f9_must-gather-ltxcn_b5895325-0d62-4355-99f5-edf281f9a5c6/copy/0.log" Nov 25 10:17:47 crc kubenswrapper[4687]: I1125 10:17:47.772758 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/must-gather-ltxcn" Nov 25 10:17:47 crc kubenswrapper[4687]: I1125 10:17:47.951175 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jlvd\" (UniqueName: \"kubernetes.io/projected/b5895325-0d62-4355-99f5-edf281f9a5c6-kube-api-access-8jlvd\") pod \"b5895325-0d62-4355-99f5-edf281f9a5c6\" (UID: \"b5895325-0d62-4355-99f5-edf281f9a5c6\") " Nov 25 10:17:47 crc kubenswrapper[4687]: I1125 10:17:47.951270 4687 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b5895325-0d62-4355-99f5-edf281f9a5c6-must-gather-output\") pod \"b5895325-0d62-4355-99f5-edf281f9a5c6\" (UID: \"b5895325-0d62-4355-99f5-edf281f9a5c6\") " Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.100616 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5895325-0d62-4355-99f5-edf281f9a5c6-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "b5895325-0d62-4355-99f5-edf281f9a5c6" (UID: "b5895325-0d62-4355-99f5-edf281f9a5c6"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.155544 4687 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b5895325-0d62-4355-99f5-edf281f9a5c6-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.200868 4687 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pc7f9_must-gather-ltxcn_b5895325-0d62-4355-99f5-edf281f9a5c6/copy/0.log" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.205135 4687 generic.go:334] "Generic (PLEG): container finished" podID="b5895325-0d62-4355-99f5-edf281f9a5c6" containerID="de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc" exitCode=143 Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.205223 4687 scope.go:117] "RemoveContainer" containerID="de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.205470 4687 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pc7f9/must-gather-ltxcn" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.238305 4687 scope.go:117] "RemoveContainer" containerID="7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.519151 4687 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5895325-0d62-4355-99f5-edf281f9a5c6-kube-api-access-8jlvd" (OuterVolumeSpecName: "kube-api-access-8jlvd") pod "b5895325-0d62-4355-99f5-edf281f9a5c6" (UID: "b5895325-0d62-4355-99f5-edf281f9a5c6"). InnerVolumeSpecName "kube-api-access-8jlvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.555428 4687 scope.go:117] "RemoveContainer" containerID="de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc" Nov 25 10:17:48 crc kubenswrapper[4687]: E1125 10:17:48.555883 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc\": container with ID starting with de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc not found: ID does not exist" containerID="de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.555916 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc"} err="failed to get container status \"de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc\": rpc error: code = NotFound desc = could not find container \"de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc\": container with ID starting with de4fcc041542c6acb11dfa831b2bd2a8ea91e3f8abcfd3098fdfdb0b5f094fbc not found: ID does not exist" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.555942 4687 scope.go:117] "RemoveContainer" containerID="7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00" Nov 25 10:17:48 crc kubenswrapper[4687]: E1125 10:17:48.556185 4687 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00\": container with ID starting with 7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00 not found: ID does not exist" containerID="7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.556217 4687 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00"} err="failed to get container status \"7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00\": rpc error: code = NotFound desc = could not find container \"7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00\": container with ID starting with 7303c7f1fde67059896078cea84ba9bbc4816856070991b935d70a4b7eb96c00 not found: ID does not exist" Nov 25 10:17:48 crc kubenswrapper[4687]: I1125 10:17:48.563802 4687 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jlvd\" (UniqueName: \"kubernetes.io/projected/b5895325-0d62-4355-99f5-edf281f9a5c6-kube-api-access-8jlvd\") on node \"crc\" DevicePath \"\"" Nov 25 10:17:48 crc kubenswrapper[4687]: E1125 10:17:48.945189 4687 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5895325_0d62_4355_99f5_edf281f9a5c6.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:17:49 crc kubenswrapper[4687]: I1125 10:17:49.746271 4687 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5895325-0d62-4355-99f5-edf281f9a5c6" path="/var/lib/kubelet/pods/b5895325-0d62-4355-99f5-edf281f9a5c6/volumes" Nov 25 10:18:23 crc kubenswrapper[4687]: I1125 10:18:23.845441 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:18:23 crc kubenswrapper[4687]: I1125 10:18:23.846085 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:18:53 crc kubenswrapper[4687]: I1125 10:18:53.643476 4687 scope.go:117] "RemoveContainer" containerID="6e410b68cf05abcafac96f0e30a1e71d4170343f5b8bd5acd671cacf35176bfe" Nov 25 10:18:53 crc kubenswrapper[4687]: I1125 10:18:53.845017 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:18:53 crc kubenswrapper[4687]: I1125 10:18:53.845366 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:19:23 crc kubenswrapper[4687]: I1125 10:19:23.844109 4687 patch_prober.go:28] interesting pod/machine-config-daemon-vcqct container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:19:23 crc kubenswrapper[4687]: I1125 10:19:23.844732 4687 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:19:23 crc kubenswrapper[4687]: I1125 10:19:23.844773 4687 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" Nov 25 10:19:23 crc kubenswrapper[4687]: I1125 10:19:23.845537 4687 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b"} pod="openshift-machine-config-operator/machine-config-daemon-vcqct" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:19:23 crc kubenswrapper[4687]: I1125 10:19:23.845582 4687 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerName="machine-config-daemon" containerID="cri-o://e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" gracePeriod=600 Nov 25 10:19:23 crc kubenswrapper[4687]: E1125 10:19:23.998202 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:19:24 crc kubenswrapper[4687]: I1125 10:19:24.067163 4687 generic.go:334] "Generic (PLEG): container finished" podID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" exitCode=0 Nov 25 10:19:24 crc kubenswrapper[4687]: I1125 10:19:24.067211 4687 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" event={"ID":"ac5dd7d0-d24d-411e-a7d0-3e921f218f4c","Type":"ContainerDied","Data":"e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b"} Nov 25 10:19:24 crc kubenswrapper[4687]: I1125 10:19:24.067249 4687 scope.go:117] "RemoveContainer" containerID="f6be9ff931585212d9effca07fbb25c8a5e8790fec54c8590b61dc256c888a77" Nov 25 10:19:24 crc kubenswrapper[4687]: I1125 10:19:24.067943 4687 scope.go:117] "RemoveContainer" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" Nov 25 10:19:24 crc kubenswrapper[4687]: E1125 10:19:24.068213 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:19:37 crc kubenswrapper[4687]: I1125 10:19:37.736457 4687 scope.go:117] "RemoveContainer" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" Nov 25 10:19:37 crc kubenswrapper[4687]: E1125 10:19:37.737548 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:19:50 crc kubenswrapper[4687]: I1125 10:19:50.734846 4687 scope.go:117] "RemoveContainer" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" Nov 25 10:19:50 crc kubenswrapper[4687]: E1125 10:19:50.735812 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:19:53 crc kubenswrapper[4687]: I1125 10:19:53.698526 4687 scope.go:117] "RemoveContainer" containerID="af6e9841c09afc2604eb05243457e3779124144efc506467599804a48965414d" Nov 25 10:20:03 crc kubenswrapper[4687]: I1125 10:20:03.735513 4687 scope.go:117] "RemoveContainer" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" Nov 25 10:20:03 crc kubenswrapper[4687]: E1125 10:20:03.736493 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:20:18 crc kubenswrapper[4687]: I1125 10:20:18.735550 4687 scope.go:117] "RemoveContainer" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" Nov 25 10:20:18 crc kubenswrapper[4687]: E1125 10:20:18.736357 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:20:30 crc kubenswrapper[4687]: I1125 10:20:30.735016 4687 scope.go:117] "RemoveContainer" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" Nov 25 10:20:30 crc kubenswrapper[4687]: E1125 10:20:30.735820 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:20:45 crc kubenswrapper[4687]: I1125 10:20:45.743983 4687 scope.go:117] "RemoveContainer" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" Nov 25 10:20:45 crc kubenswrapper[4687]: E1125 10:20:45.745403 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:20:59 crc kubenswrapper[4687]: I1125 10:20:59.734695 4687 scope.go:117] "RemoveContainer" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" Nov 25 10:20:59 crc kubenswrapper[4687]: E1125 10:20:59.735479 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:21:14 crc kubenswrapper[4687]: I1125 10:21:14.734893 4687 scope.go:117] "RemoveContainer" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" Nov 25 10:21:14 crc kubenswrapper[4687]: E1125 10:21:14.735704 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" Nov 25 10:21:29 crc kubenswrapper[4687]: I1125 10:21:29.734837 4687 scope.go:117] "RemoveContainer" containerID="e8517af667151b47b35b66d19e45a1aad54e141f8e6202cc81473569b9ed673b" Nov 25 10:21:29 crc kubenswrapper[4687]: E1125 10:21:29.737191 4687 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vcqct_openshift-machine-config-operator(ac5dd7d0-d24d-411e-a7d0-3e921f218f4c)\"" pod="openshift-machine-config-operator/machine-config-daemon-vcqct" podUID="ac5dd7d0-d24d-411e-a7d0-3e921f218f4c" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111301466024443 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111301467017361 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111270010016471 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111270010015441 5ustar corecore